input
stringclasses 1
value | context
stringlengths 5.05k
188k
| answers
stringlengths 22
82
| length
int32 502
23.3k
| dataset
stringclasses 1
value | language
stringclasses 1
value | all_classes
null | _id
stringlengths 48
48
|
---|---|---|---|---|---|---|---|
"""Tools for parsing a regular expression into a Pattern."""
import collections
import string
import charsource
import pattern as p
# Characters that represent themselves in a regular expression.
# TODO(jasonpr): Handle $ and ^ specially at edges of regex.
_CHAR_LITERALS = string.ascii_letters + string.digits + '!"#$%&\',-/:;<=>@^_`~]} \t\n\r'
# Characters that represent themselves inside a square-bracket expression.
_GROUP_CHARS = string.ascii_letters + string.digits + '!"#$%&\'()*+,-./:;<=>?@[^_`{|}~'
# Characters that represent themselves when escaped with a backslash.
_IDENTIY_ESCAPES = r'.[\()*+?{|'
# Characters that represent a character class when escaped with a backslash.
_CHARACTER_CLASSES = {
'd': string.digits,
'w': string.ascii_letters + string.digits + '_',
'h': string.hexdigits,
# TODO(jasonpr): Make an informed decision, rather than blindly
# inheritting this definition from Python.
's': string.whitespace,
}
_BRACKET_CHARACTER_CLASSES = {
'alnum': set(string.ascii_letters + string.digits),
'alpha': set(string.ascii_letters),
'digit': set(string.digits),
'lower': set(string.ascii_lowercase),
'print': set(string.printable),
'punct': set(string.punctuation),
# TODO(jasonpr): Make an informed decision, rather than blindly
# inheritting this definition from Python.
'space': set(string.whitespace),
'upper': set(string.ascii_uppercase),
'xdigit': set(string.hexdigits),
}
def parse_regex(regex_string):
"""Convert a regular expression string into a Pattern."""
return _parse_regex(charsource.GetPutSource(regex_string))
# The following _parse_* methods form a recursive descent parser
# that respect the order of operations in a regular expression.
def _parse_regex(source):
"""Parse any regex into a Pattern."""
return _parse_alternation(source)
def _parse_alternation(source):
"""Parse an alternation expression, like 'ab|cd|ef'."""
parts = []
# Act as though the last character was a '|', so we get the
# initial element of the alternation.
last_char = '|'
while last_char == '|':
parts.append(_parse_concatenation(source))
last_char = source.get()
# Put back the non-alternation character.
source.put(last_char)
return p.Or(*parts)
def _parse_concatenation(source):
"""Parse a concatenation expression, like 'abc' or 'a(b|c)d*'."""
parts = []
duplication = _parse_duplication(source)
# If we're expecting a concatenation, there MUST be at least
# one (first) element!
assert duplication
while duplication:
parts.append(duplication)
duplication = _parse_duplication(source)
return p.Sequence(*parts)
def _parse_duplication(source):
"""Parse a duplication expression, like 'a*' or '(a|b){3,5}'."""
duplicated = _parse_parenthesization(source)
if not duplicated:
return None
duplicator = source.get()
if duplicator == '?':
return p.Maybe(duplicated)
elif duplicator == '*':
return p.Star(duplicated)
elif duplicator == '+':
return p.Plus(duplicated)
elif duplicator == '{':
min_repeats = _parse_positive_int(source)
range_continuation = source.get()
# We will ultimately expect a closing curly brace, but
# we might see a comma and a max repeats value, first.
if range_continuation == ',':
max_repeats = _parse_positive_int(source)
range_continuation = source.get()
else:
max_repeats = min_repeats
if range_continuation != '}':
raise ValueError('Expected "}", but got "%s".' %
range_continuation)
return p.Repeat(duplicated, min_repeats, max_repeats)
else:
source.put(duplicator)
return duplicated
def _parse_parenthesization(source):
"""Parse a parenthesization pattern, like '(a|b)' or '[ab]' or 'a'.
Note that '[ab]' is a parenthesization, since it is equivalent
to '([ab])'. Similarly, 'a' is equivalent to '(a)'.
"""
first_char = source.get()
if first_char == '(':
enclosed_regex = _parse_regex(source)
close_paren = source.get()
assert close_paren == ')'
return enclosed_regex
# Otherwise, this must just be a group. (Groups have just as
# tight of binding as a parenthesization.)
source.put(first_char)
return _parse_group(source)
def _parse_group(source):
"""Parse a group pattern, like '[abc]' or 'a'.
Note that 'a' is a group, since 'a' is equivalent to '[a]'.
"""
first_char = source.get()
if first_char == '[':
second_char = source.get()
if second_char == '^':
negating = True
else:
source.put(second_char)
negating = False
group_chars = _parse_group_chars(source)
result = p.Selection(group_chars, negating)
close_brace = source.get()
assert close_brace == ']'
return result
# Otherwise, it's a single normal character.
source.put(first_char)
return _parse_atom(source)
def _parse_group_chars(source):
"""Parse the characters from a group specification.
This is just a string of characters allowable in a group specification.
For example, a valid parse is 'aA1.?', since '[aA1.?]' is a valid group.
"""
chars = set()
while True:
range_chars = _parse_group_range(source)
if range_chars:
for char in range_chars:
chars.add(char)
continue
char_class = _parse_char_class(source)
if char_class:
chars |= char_class
continue
char = source.get()
if not char:
raise ValueError('Unexpected end of stream.')
if char not in _GROUP_CHARS:
source.put(char)
break
chars.add(char)
return ''.join(chars)
def _parse_atom(source):
"""Parse a single regex atom.
An atom is a period ('.'), a character literal, or an escape sequence.
"""
char = source.get()
if not char:
# For good measure, put the EOF back on!
# This doesn't really do anything, since the source will
# generate EOFs forever.
source.put(char)
return None
elif char == '.':
return p.Anything()
elif char in _CHAR_LITERALS:
return p.String(char)
elif char == '\\':
escaped = source.get()
if escaped in _IDENTIY_ESCAPES:
return p.String(escaped)
elif escaped in _CHARACTER_CLASSES:
return p.Selection(_CHARACTER_CLASSES[escaped])
else:
raise ValueError('Unexpected escape sequence, \\%s.', escaped)
else:
source.put(char)
return None
def _parse_positive_int(source):
"""Parse a positive integer.
That is, parse a sequence of one or more digits.
"""
digits = []
next_char = source.get()
assert next_char and next_char in string.digits
while next_char and next_char in string.digits:
digits.append(next_char)
next_char = source.get()
source.put(next_char)
return int(''.join(digits))
def _parse_group_range(source):
"""Parse a three-character group range expression.
Return the set of characters represented by the range.
For example, parsing the expression 'c-e' from the source returns
set(['c', 'd', 'e']).
"""
start = source.get()
if start not in _GROUP_CHARS:
source.put(start)
return None
middle = source.get()
if middle != '-':
source.put(middle)
source.put(start)
return None
end = source.get()
if end not in _GROUP_CHARS:
source.put(end)
source.put(middle)
source.put(start)
return None
range_chars = set()
for ascii_value in range(ord(start), ord(end) + 1):
range_chars.add(chr(ascii_value))
return range_chars
def _parse_char_class(source):
for class_name, class_contents in _BRACKET_CHARACTER_CLASSES.iteritems():
| if _parse_verbatim(source, '[:%s:]' % class_name): | 850 | lcc_e | python | null | da4a2050450c0e6dbb46728a24c2681051dcf60db99aed6b |
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 8 14:27:22 2016
@author: Viktor
"""
import numpy as np
from sklearn.datasets import fetch_mldata
from matplotlib import pyplot as plt
from skimage.io import imread
from skimage.io import imshow
from skimage.morphology import opening, closing
from scipy import ndimage
from sklearn.neighbors import KNeighborsClassifier
#ucitavanje MNIST dataseta
mnist = fetch_mldata('MNIST original')
print(mnist.data.shape)
print(mnist.target.shape)
print(np.unique(mnist.target))
img = 255-mnist.data[12345]
img = img.reshape(28,28)
plt.imshow(-img, cmap='Greys')
#iscitavanje dataseta i smestanje u matricu radi lakseg pristupa
numbers = [0]*10
numbers[0] = mnist['data'][np.where(mnist['target'] == 0.)[0]]
numbers[1] = mnist['data'][np.where(mnist['target'] == 1.)[0]]
numbers[2] = mnist['data'][np.where(mnist['target'] == 2.)[0]]
numbers[3] = mnist['data'][np.where(mnist['target'] == 3.)[0]]
numbers[4] = mnist['data'][np.where(mnist['target'] == 4.)[0]]
numbers[5] = mnist['data'][np.where(mnist['target'] == 5.)[0]]
numbers[6] = mnist['data'][np.where(mnist['target'] == 6.)[0]]
numbers[7] = mnist['data'][np.where(mnist['target'] == 7.)[0]]
numbers[8] = mnist['data'][np.where(mnist['target'] == 8.)[0]]
numbers[9] = mnist['data'][np.where(mnist['target'] == 9.)[0]]
test = numbers[0][123]
res = numbers[0][123] == numbers[0][124]
percent_hit = np.count_nonzero(res) / 784.0
representative_number = [0]*10
for j in range(0,10):
representative_number[j] = np.zeros(np.shape(numbers[j][0]), dtype='float')
for i in range(0,len(numbers[j])):
representative_number[j] = representative_number[j] + numbers[j][i]
representative_number[j] = (representative_number[j])/len(numbers[j])
def processing(path):
img = imread(path)
gray = rgb2gray(img)
binary = 1 - (gray > 0.5)
binary = closing(binary)
binary = opening(binary)
labeled, nr_objects = ndimage.label(binary)
return nr_objects
def poklapanje(niz1, niz2):
mera_poklapanja = 0.0
for i in range(0,len(niz1)):
if(niz1[i]==niz2[i]):
mera_poklapanja = mera_poklapanja + 1
return mera_poklapanja/len(niz1)
def ucitavanje(path):
image_path = []
with open(path) as f:
data = f.read()
lines = data.split('\n')
for i, line in enumerate(lines):
if(i>1):
cols = line.split('\t')
if(cols[0]!=''):
image_path.append(cols[0])
f.close()
return image_path
def upis(path,image_path,result):
with open(path,'w') as f:
f.write('RA 1/2013 Viktor Sanca\n')
f.write('file\tsum\n')
for i in range(0,len(image_path)):
f.write(image_path[i]+'\t'+str(result[i])+'\n')
f.close()
def get_img(image_path):
img = imread(image_path)
gray = rgb2gray(img)
#gray = closing(gray)
#gray = opening(gray)
#binary = (gray < 0.5)
return gray
def binarize(img):
return img>1
def rgb2gray(img_rgb):
img_gray = np.ndarray((img_rgb.shape[0], img_rgb.shape[1]))
img_gray = 0.8*img_rgb[:, :, 0] + 0.2*img_rgb[:, :, 1] + 1*img_rgb[:, :, 2]
img_gray = img_gray.astype('uint8')
return img_gray
def mark_indices(image):
starting_indices = []
img = image.reshape(640*480)
for i in range(0,(640)*(480-28)):
if(img[i]<10 and img[i+27]<10 and img[i+27*(640)]<10 and img[i+27*(640)+27]<10):
starting_indices.append(i)
return starting_indices
def get_image_from_indice(image,start_indice):
image28_28 = np.empty((28*28),dtype='uint8')
img = image.reshape(640*480)
for i in range(0,28):
for j in range(0,28):
image28_28[28*i+j]=img[start_indice+i*(640)+j]
return image28_28
def find_number(image28_28):
mmx = [0]*10
for i in range(0,10):
for j in range(0,len(numbers[i])):
res = binarize(image28_28) == binarize(numbers[i][j])
if(np.count_nonzero(res)>mmx[i]):
mmx[i]=np.count_nonzero(res)
return max_idx(mmx)
def max_idx(lista):
mx = max(lista)
for i in range(0,len(lista)):
if(lista[i]==mx):
return i
return -1
image_path = []
result = []
in_path = 'level-1-mnist-train/level-1-mnist/out.txt'
out_path = 'level-1-mnist-test/level-1-mnist-test/out.txt'
train_path = 'level-1-mnist-train/level-1-mnist/'
test_path = 'level-1-mnist-test/level-1-mnist-test/'
image_paths = ucitavanje(out_path)
#knn = KNeighborsClassifier()
knn = KNeighborsClassifier(n_neighbors=2000,weights='distance',algorithm='auto',n_jobs=-1)
knn.fit(mnist.data,mnist.target)
suma = [0]*len(image_paths)
for i in range(0,len(image_paths)):
print('Image'+str(i+1)+'/'+str(len(image_paths)))
img = get_img(test_path+image_paths[i])
start_indices = mark_indices(img.reshape(640*480))
for start_indice in start_indices:
img_d = get_image_from_indice(img,start_indice)
#nr = find_number(img_d)
nr = knn.predict(img_d)
suma[i] = suma[i] + nr[0]
suma[i] = int(suma[i])
for i in range(0,len(suma)):
suma[i] = float(suma[i])
upis(out_path, image_paths, suma)
image28_28 = img_d
mmx = [0]*10
for i in range(0,10):
for j in range(0,len(numbers[i])):
res = image28_28 == numbers[i][j]
if(np.count_nonzero(res)>mmx[i]):
mmx[i]=np.count_nonzero(res)
total = np.zeros(784, dtype='float')
for i in range(0,10):
total = total + representative_number[i]
img = representative_number[4]
img = img.reshape(28,28)
plt.imshow(img, cmap='Greys')
| check = numbers[5][123] | 502 | lcc_e | python | null | 3a9e8b1453db3c2f6454e9ee3926124b1bbca244e7f8e637 |
|
import gtk
import gobject
import pygame
import pygame.event
class _MockEvent(object):
def __init__(self, keyval):
self.keyval = keyval
class Translator(object):
key_trans = {
'Alt_L': pygame.K_LALT,
'Alt_R': pygame.K_RALT,
'Control_L': pygame.K_LCTRL,
'Control_R': pygame.K_RCTRL,
'Shift_L': pygame.K_LSHIFT,
'Shift_R': pygame.K_RSHIFT,
'Super_L': pygame.K_LSUPER,
'Super_R': pygame.K_RSUPER,
'KP_Page_Up' : pygame.K_KP9,
'KP_Page_Down' : pygame.K_KP3,
'KP_End' : pygame.K_KP1,
'KP_Home' : pygame.K_KP7,
'KP_Up' : pygame.K_KP8,
'KP_Down' : pygame.K_KP2,
'KP_Left' : pygame.K_KP4,
'KP_Right' : pygame.K_KP6,
'numbersign' : pygame.K_HASH,
'percent' : ord('%'),
'exclam' : pygame.K_EXCLAIM,
'asciicircum' : pygame.K_CARET,
'parenleft' : pygame.K_LEFTPAREN,
'parenright' : pygame.K_RIGHTPAREN,
'braceleft' : ord('{'),
'braceright' : ord('}'),
'bracketleft' : pygame.K_LEFTBRACKET,
'bracketright' : pygame.K_RIGHTBRACKET,
'apostrophe' : ord('\''),
'equal' : pygame.K_EQUALS,
'grave' : pygame.K_BACKQUOTE,
'Caps_Lock' : pygame.K_CAPSLOCK,
'Page_Up' : pygame.K_PAGEUP,
'Page_Down' : pygame.K_PAGEDOWN,
'Num_Lock' : pygame.K_NUMLOCK,
'Bar' : ord('|')
}
mod_map = {
pygame.K_LALT: pygame.KMOD_LALT,
pygame.K_RALT: pygame.KMOD_RALT,
pygame.K_LCTRL: pygame.KMOD_LCTRL,
pygame.K_RCTRL: pygame.KMOD_RCTRL,
pygame.K_LSHIFT: pygame.KMOD_LSHIFT,
pygame.K_RSHIFT: pygame.KMOD_RSHIFT,
}
def __init__(self, mainwindow, inner_evb):
"""Initialise the Translator with the windows to which to listen"""
self._mainwindow = mainwindow
self._inner_evb = inner_evb
# Enable events
# (add instead of set here because the main window is already realized)
self._mainwindow.add_events(
gtk.gdk.KEY_PRESS_MASK | \
gtk.gdk.KEY_RELEASE_MASK | \
gtk.gdk.VISIBILITY_NOTIFY_MASK
)
self._inner_evb.set_events(
gtk.gdk.POINTER_MOTION_MASK | \
gtk.gdk.POINTER_MOTION_HINT_MASK | \
gtk.gdk.BUTTON_MOTION_MASK | \
gtk.gdk.BUTTON_PRESS_MASK | \
gtk.gdk.BUTTON_RELEASE_MASK
)
self._mainwindow.set_flags(gtk.CAN_FOCUS)
self._inner_evb.set_flags(gtk.CAN_FOCUS)
# Callback functions to link the event systems
self._mainwindow.connect('unrealize', self._quit_cb)
self._mainwindow.connect('visibility_notify_event', self._visibility)
self._inner_evb.connect('key_press_event', self._keydown_cb)
self._inner_evb.connect('key_release_event', self._keyup_cb)
self._inner_evb.connect('button_press_event', self._mousedown_cb)
self._inner_evb.connect('button_release_event', self._mouseup_cb)
self._inner_evb.connect('motion-notify-event', self._mousemove_cb)
self._inner_evb.connect('expose-event', self._expose_cb)
self._inner_evb.connect('configure-event', self._resize_cb)
self._inner_evb.connect('screen-changed', self._screen_changed_cb)
# Internal data
self.__stopped = False
self.__keystate = [0] * 323
self.__button_state = [0,0,0]
self.__mouse_pos = (0,0)
self.__repeat = (None, None)
self.__held = set()
self.__held_time_left = {}
self.__held_last_time = {}
self.__held_last_value = {}
self.__tick_id = None
def hook_pygame(self):
pygame.key.get_pressed = self._get_pressed
pygame.key.set_repeat = self._set_repeat
pygame.mouse.get_pressed = self._get_mouse_pressed
pygame.mouse.get_pos = self._get_mouse_pos
def _visibility(self, widget, event):
if pygame.display.get_init():
pygame.event.post(pygame.event.Event(pygame.VIDEOEXPOSE))
return False
def _expose_cb(self, widget, event):
if pygame.display.get_init():
pygame.event.post(pygame.event.Event(pygame.VIDEOEXPOSE))
return True
def _resize_cb(self, widget, event):
evt = pygame.event.Event(pygame.VIDEORESIZE,
size=(event.width,event.height), width=event.width, height=event.height)
pygame.event.post(evt)
return False # continue processing
def _screen_changed_cb(self, widget, event):
if pygame.display.get_init():
pygame.event.post(pygame.event.Event(pygame.VIDEOEXPOSE))
def _quit_cb(self, data=None):
self.__stopped = True
pygame.event.post(pygame.event.Event(pygame.QUIT))
def _keydown_cb(self, widget, event):
key = event.hardware_keycode
keyval = event.keyval
if key in self.__held:
return True
else:
if self.__repeat[0] is not None:
self.__held_last_time[key] = pygame.time.get_ticks()
self.__held_time_left[key] = self.__repeat[0]
self.__held_last_value[key] = keyval
self.__held.add(key)
return self._keyevent(widget, event, pygame.KEYDOWN)
def _keyup_cb(self, widget, event):
key = event.hardware_keycode
if self.__repeat[0] is not None:
if key in self.__held:
# This is possibly false if set_repeat() is called with a key held
del self.__held_time_left[key]
del self.__held_last_time[key]
del self.__held_last_value[key]
self.__held.discard(key)
return self._keyevent(widget, event, pygame.KEYUP)
def _keymods(self):
mod = 0
for key_val, mod_val in self.mod_map.iteritems():
mod |= self.__keystate[key_val] and mod_val
return mod
def _keyevent(self, widget, event, type):
key = gtk.gdk.keyval_name(event.keyval)
if key is None:
# No idea what this key is.
return False
keycode = None
if key in self.key_trans:
keycode = self.key_trans[key]
elif hasattr(pygame, 'K_'+key.upper()):
keycode = getattr(pygame, 'K_'+key.upper())
elif hasattr(pygame, 'K_'+key.lower()):
keycode = getattr(pygame, 'K_'+key.lower())
elif key == 'XF86Start':
# view source request, specially handled...
self._mainwindow.view_source()
else:
print 'Key %s unrecognized' % key
if keycode is not None:
if type == pygame.KEYDOWN:
mod = self._keymods()
self.__keystate[keycode] = type == pygame.KEYDOWN
if type == pygame.KEYUP:
mod = self._keymods()
ukey = unichr(gtk.gdk.keyval_to_unicode(event.keyval))
if ukey == '\000':
ukey = ''
evt = pygame.event.Event(type, key=keycode, unicode=ukey, mod=mod)
self._post(evt)
return True
def _get_pressed(self):
return self.__keystate
def _get_mouse_pressed(self):
return self.__button_state
def _mousedown_cb(self, widget, event):
self.__button_state[event.button-1] = 1
widget.grab_focus()
return self._mouseevent(widget, event, pygame.MOUSEBUTTONDOWN)
def _mouseup_cb(self, widget, event):
self.__button_state[event.button-1] = 0
return self._mouseevent(widget, event, pygame.MOUSEBUTTONUP)
def _mouseevent(self, widget, event, type):
evt = pygame.event.Event(type, button=event.button, pos=(event.x, event.y))
self._post(evt)
return True
def _mousemove_cb(self, widget, event):
# From http://www.learningpython.com/2006/07/25/writing-a-custom-widget-using-pygtk/
# if this is a hint, then let's get all the necessary
# information, if not it's all we need.
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x = event.x
y = event.y
state = event.state
rel = (x - self.__mouse_pos[0], y - self.__mouse_pos[1])
self.__mouse_pos = (x, y)
self.__button_state = [
state & gtk.gdk.BUTTON1_MASK and 1 or 0,
state & gtk.gdk.BUTTON2_MASK and 1 or 0,
state & gtk.gdk.BUTTON3_MASK and 1 or 0,
]
evt = pygame.event.Event(pygame.MOUSEMOTION,
pos=self.__mouse_pos, rel=rel, buttons=self.__button_state)
self._post(evt)
return True
def _tick_cb(self):
cur_time = pygame.time.get_ticks()
for key in self.__held:
delta = cur_time - self.__held_last_time[key]
self.__held_last_time[key] = cur_time
self.__held_time_left[key] -= delta
if self.__held_time_left[key] <= 0:
self.__held_time_left[key] = self.__repeat[1]
self._keyevent(None, _MockEvent(self.__held_last_value[key]), pygame.KEYDOWN)
return True
def _set_repeat(self, delay=None, interval=None):
if delay is not None and self.__repeat[0] is None:
| self.__tick_id = gobject.timeout_add(10, self._tick_cb) | 703 | lcc_e | python | null | 8470084fed939b4e4fcf2f79678790b3986d9ec82bc29985 |
|
import json
import os
import sys
from datetime import datetime, timedelta
import wptserve
from wptserve import sslutils
from . import environment as env
from . import instruments
from . import mpcontext
from . import products
from . import testloader
from . import wptcommandline
from . import wptlogging
from . import wpttest
from mozlog import capture, handlers
from .font import FontInstaller
from .testrunner import ManagerGroup
here = os.path.dirname(__file__)
logger = None
"""Runner for web-platform-tests
The runner has several design goals:
* Tests should run with no modification from upstream.
* Tests should be regarded as "untrusted" so that errors, timeouts and even
crashes in the tests can be handled without failing the entire test run.
* For performance tests can be run in multiple browsers in parallel.
The upstream repository has the facility for creating a test manifest in JSON
format. This manifest is used directly to determine which tests exist. Local
metadata files are used to store the expected test results.
"""
def setup_logging(*args, **kwargs):
global logger
logger = wptlogging.setup(*args, **kwargs)
return logger
def get_loader(test_paths, product, debug=None, run_info_extras=None, chunker_kwargs=None,
test_groups=None, **kwargs):
if run_info_extras is None:
run_info_extras = {}
run_info = wpttest.get_run_info(kwargs["run_info"], product,
browser_version=kwargs.get("browser_version"),
browser_channel=kwargs.get("browser_channel"),
verify=kwargs.get("verify"),
debug=debug,
extras=run_info_extras,
enable_webrender=kwargs.get("enable_webrender"))
test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"],
manifest_download=kwargs["manifest_download"]).load()
manifest_filters = []
include = kwargs["include"]
if kwargs["include_file"]:
include = include or []
include.extend(testloader.read_include_from_file(kwargs["include_file"]))
if test_groups:
include = testloader.update_include_for_groups(test_groups, include)
if include or kwargs["exclude"] or kwargs["include_manifest"] or kwargs["default_exclude"]:
manifest_filters.append(testloader.TestFilter(include=include,
exclude=kwargs["exclude"],
manifest_path=kwargs["include_manifest"],
test_manifests=test_manifests,
explicit=kwargs["default_exclude"]))
ssl_enabled = sslutils.get_cls(kwargs["ssl_type"]).ssl_enabled
h2_enabled = wptserve.utils.http2_compatible()
test_loader = testloader.TestLoader(test_manifests,
kwargs["test_types"],
run_info,
manifest_filters=manifest_filters,
chunk_type=kwargs["chunk_type"],
total_chunks=kwargs["total_chunks"],
chunk_number=kwargs["this_chunk"],
include_https=ssl_enabled,
include_h2=h2_enabled,
include_webtransport_h3=kwargs["enable_webtransport_h3"],
skip_timeout=kwargs["skip_timeout"],
skip_implementation_status=kwargs["skip_implementation_status"],
chunker_kwargs=chunker_kwargs)
return run_info, test_loader
def list_test_groups(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
run_info_extras = products.Product(kwargs["config"], product).run_info_extras(**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for item in sorted(test_loader.groups(kwargs["test_types"])):
print(item)
def list_disabled(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
rv = []
run_info_extras = products.Product(kwargs["config"], product).run_info_extras(**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for test_type, tests in test_loader.disabled_tests.items():
for test in tests:
rv.append({"test": test.id, "reason": test.disabled()})
print(json.dumps(rv, indent=2))
def list_tests(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
run_info_extras = products.Product(kwargs["config"], product).run_info_extras(**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for test in test_loader.test_ids:
print(test)
def get_pause_after_test(test_loader, **kwargs):
if kwargs["pause_after_test"] is None:
if kwargs["repeat_until_unexpected"]:
return False
if kwargs["headless"]:
return False
if kwargs["debug_test"]:
return True
tests = test_loader.tests
is_single_testharness = (sum(len(item) for item in tests.values()) == 1 and
len(tests.get("testharness", [])) == 1)
if kwargs["repeat"] == 1 and kwargs["rerun"] == 1 and is_single_testharness:
return True
return False
return kwargs["pause_after_test"]
def run_test_iteration(test_status, test_loader, test_source_kwargs, test_source_cls, run_info,
recording, test_environment, product, run_test_kwargs):
"""Runs the entire test suite.
This is called for each repeat run requested."""
tests = []
for test_type in test_loader.test_types:
tests.extend(test_loader.tests[test_type])
try:
test_groups = test_source_cls.tests_by_group(
tests, **test_source_kwargs)
except Exception:
logger.critical("Loading tests failed")
return False
logger.suite_start(test_groups,
name='web-platform-test',
run_info=run_info,
extra={"run_by_dir": run_test_kwargs["run_by_dir"]})
for test_type in run_test_kwargs["test_types"]:
logger.info(f"Running {test_type} tests")
browser_cls = product.get_browser_cls(test_type)
browser_kwargs = product.get_browser_kwargs(logger,
test_type,
run_info,
config=test_environment.config,
num_test_groups=len(test_groups),
**run_test_kwargs)
executor_cls = product.executor_classes.get(test_type)
executor_kwargs = product.get_executor_kwargs(logger,
test_type,
test_environment,
run_info,
**run_test_kwargs)
if executor_cls is None:
logger.error(f"Unsupported test type {test_type} for product {product.name}")
continue
for test in test_loader.disabled_tests[test_type]:
logger.test_start(test.id)
logger.test_end(test.id, status="SKIP")
test_status.skipped += 1
if test_type == "testharness":
run_tests = {"testharness": []}
for test in test_loader.tests["testharness"]:
if ((test.testdriver and not executor_cls.supports_testdriver) or
(test.jsshell and not executor_cls.supports_jsshell)):
logger.test_start(test.id)
logger.test_end(test.id, status="SKIP")
test_status.skipped += 1
else:
run_tests["testharness"].append(test)
else:
run_tests = test_loader.tests
recording.pause()
with ManagerGroup("web-platform-tests",
run_test_kwargs["processes"],
test_source_cls,
test_source_kwargs,
browser_cls,
browser_kwargs,
executor_cls,
executor_kwargs,
run_test_kwargs["rerun"],
run_test_kwargs["pause_after_test"],
run_test_kwargs["pause_on_unexpected"],
run_test_kwargs["restart_on_unexpected"],
run_test_kwargs["debug_info"],
not run_test_kwargs["no_capture_stdio"],
recording=recording) as manager_group:
try:
manager_group.run(test_type, run_tests)
except KeyboardInterrupt:
logger.critical("Main thread got signal")
manager_group.stop()
raise
test_status.total_tests += manager_group.test_count()
test_status.unexpected += manager_group.unexpected_count()
test_status.unexpected_pass += manager_group.unexpected_pass_count()
return True
def evaluate_runs(test_status, run_test_kwargs):
"""Evaluates the test counts after the given number of repeat runs has finished"""
if test_status.total_tests == 0:
if test_status.skipped > 0:
logger.warning("All requested tests were skipped")
else:
if run_test_kwargs["default_exclude"]:
logger.info("No tests ran")
return True
else:
logger.critical("No tests ran")
return False
if test_status.unexpected and not run_test_kwargs["fail_on_unexpected"]:
logger.info(f"Tolerating {test_status.unexpected} unexpected results")
return True
all_unexpected_passed = (test_status.unexpected and
test_status.unexpected == test_status.unexpected_pass)
if all_unexpected_passed and not run_test_kwargs["fail_on_unexpected_pass"]:
logger.info(f"Tolerating {test_status.unexpected_pass} unexpected results "
"because they all PASS")
return True
return test_status.unexpected == 0
class TestStatus:
"""Class that stores information on the results of test runs for later reference"""
def __init__(self):
self.total_tests = 0
self.skipped = 0
self.unexpected = 0
self.unexpected_pass = 0
self.repeated_runs = 0
self.expected_repeated_runs = 0
self.all_skipped = False
def run_tests(config, test_paths, product, **kwargs):
"""Set up the test environment, load the list of tests to be executed, and
invoke the remainder of the code to execute tests"""
mp = mpcontext.get_context()
if kwargs["instrument_to_file"] is None:
recorder = instruments.NullInstrument()
else:
recorder = instruments.Instrument(kwargs["instrument_to_file"])
with recorder as recording, capture.CaptureIO(logger,
not kwargs["no_capture_stdio"],
mp_context=mp):
recording.set(["startup"])
env.do_delayed_imports(logger, test_paths)
product = products.Product(config, product)
env_extras = product.get_env_extras(**kwargs)
product.check_args(**kwargs)
if kwargs["install_fonts"]:
env_extras.append(FontInstaller(
logger,
font_dir=kwargs["font_dir"],
ahem=os.path.join(test_paths["/"]["tests_path"], "fonts/Ahem.ttf")
))
recording.set(["startup", "load_tests"])
test_groups = (testloader.TestGroupsFile(logger, kwargs["test_groups_file"])
if kwargs["test_groups_file"] else None)
(test_source_cls,
test_source_kwargs,
chunker_kwargs) = testloader.get_test_src(logger=logger,
test_groups=test_groups,
**kwargs)
run_info, test_loader = get_loader(test_paths,
product.name,
run_info_extras=product.run_info_extras(**kwargs),
chunker_kwargs=chunker_kwargs,
test_groups=test_groups,
**kwargs)
logger.info("Using %i client processes" % kwargs["processes"])
test_status = TestStatus()
repeat = kwargs["repeat"]
test_status.expected_repeat = repeat
if len(test_loader.test_ids) == 0 and kwargs["test_list"]:
logger.critical("Unable to find any tests at the path(s):")
for path in kwargs["test_list"]:
logger.critical(" %s" % path)
logger.critical("Please check spelling and make sure there are tests in the specified path(s).")
return False, test_status
kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
ssl_config = {"type": kwargs["ssl_type"],
"openssl": {"openssl_binary": kwargs["openssl_binary"]},
"pregenerated": {"host_key_path": kwargs["host_key_path"],
"host_cert_path": kwargs["host_cert_path"],
"ca_cert_path": kwargs["ca_cert_path"]}}
testharness_timeout_multipler = product.get_timeout_multiplier("testharness",
run_info,
**kwargs)
mojojs_path = kwargs["mojojs_path"] if kwargs["enable_mojojs"] else None
recording.set(["startup", "start_environment"])
with env.TestEnvironment(test_paths,
testharness_timeout_multipler,
kwargs["pause_after_test"],
kwargs["debug_test"],
kwargs["debug_info"],
product.env_options,
ssl_config,
env_extras,
kwargs["enable_webtransport_h3"],
mojojs_path) as test_environment:
recording.set(["startup", "ensure_environment"])
try:
test_environment.ensure_started()
start_time = datetime.now()
except env.TestEnvironmentError as e:
logger.critical("Error starting test environment: %s" % e)
raise
recording.set(["startup"])
max_time = None
if "repeat_max_time" in kwargs:
max_time = timedelta(minutes=kwargs["repeat_max_time"])
repeat_until_unexpected = kwargs["repeat_until_unexpected"]
# keep track of longest time taken to complete a test suite iteration
# so that the runs can be stopped to avoid a possible TC timeout.
longest_iteration_time = timedelta()
while test_status.repeated_runs < repeat or repeat_until_unexpected:
# if the next repeat run could cause the TC timeout to be reached,
# stop now and use the test results we have.
# Pad the total time by 10% to ensure ample time for the next iteration(s).
estimate = (datetime.now() +
timedelta(seconds=(longest_iteration_time.total_seconds() * 1.1)))
if not repeat_until_unexpected and max_time and estimate >= start_time + max_time:
logger.info(f"Ran {test_status.repeated_runs} of {repeat} iterations.")
break
# begin tracking runtime of the test suite
iteration_start = datetime.now()
test_status.repeated_runs += 1
if repeat_until_unexpected:
logger.info(f"Repetition {test_status.repeated_runs}")
elif repeat > 1:
logger.info(f"Repetition {test_status.repeated_runs} / {repeat}")
iter_success = run_test_iteration(test_status, test_loader, test_source_kwargs,
test_source_cls, run_info, recording,
test_environment, product, kwargs)
# if there were issues with the suite run(tests not loaded, etc.) return
if not iter_success:
return False, test_status
recording.set(["after-end"])
logger.info(f"Got {test_status.unexpected} unexpected results, "
f"with {test_status.unexpected_pass} unexpected passes")
logger.suite_end()
# Note this iteration's runtime
iteration_runtime = datetime.now() - iteration_start
# determine the longest test suite runtime seen.
longest_iteration_time = max(longest_iteration_time,
iteration_runtime)
if repeat_until_unexpected and test_status.unexpected > 0:
break
if test_status.repeated_runs == 1 and len(test_loader.test_ids) == test_status.skipped:
test_status.all_skipped = True
break
# Return the evaluation of the runs and the number of repeated iterations that were run.
return evaluate_runs(test_status, kwargs), test_status
def check_stability(**kwargs):
from . import stability
if kwargs["stability"]:
logger.warning("--stability is deprecated; please use --verify instead!")
kwargs['verify_max_time'] = None
kwargs['verify_chaos_mode'] = False
kwargs['verify_repeat_loop'] = 0
kwargs['verify_repeat_restart'] = 10 if kwargs['repeat'] == 1 else kwargs['repeat']
kwargs['verify_output_results'] = True
return stability.check_stability(logger,
max_time=kwargs['verify_max_time'],
chaos_mode=kwargs['verify_chaos_mode'],
repeat_loop=kwargs['verify_repeat_loop'],
repeat_restart=kwargs['verify_repeat_restart'],
output_results=kwargs['verify_output_results'],
**kwargs)
def start(**kwargs):
assert logger is not None
logged_critical = wptlogging.LoggedAboveLevelHandler("CRITICAL")
| handler = handlers.LogLevelFilter(logged_critical, "CRITICAL") | 1,196 | lcc_e | python | null | 2477e626c89426f565807bbc5e35a472279f3e388787ac17 |
|
# -*- coding: utf-8 -*-
"""
This module contains a POI Manager core class which gives capability to mark
points of interest, re-optimise their position, and keep track of sample drift
over time.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
from qtpy import QtCore
import ctypes # is a foreign function library for Python. It provides C
# compatible data types, and allows calling functions in DLLs
# or shared libraries. It can be used to wrap these libraries
# in pure Python.
from interface.wavemeter_interface import WavemeterInterface
from core.base import Base
from core.util.mutex import Mutex
class HardwarePull(QtCore.QObject):
""" Helper class for running the hardware communication in a separate thread. """
# signal to deliver the wavelength to the parent class
sig_wavelength = QtCore.Signal(float, float)
def __init__(self, parentclass):
super().__init__()
# remember the reference to the parent class to access functions ad settings
self._parentclass = parentclass
def handle_timer(self, state_change):
""" Threaded method that can be called by a signal from outside to start the timer.
@param bool state: (True) starts timer, (False) stops it.
"""
if state_change:
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self._measure_thread)
self.timer.start(self._parentclass._measurement_timing)
else:
if hasattr(self, 'timer'):
self.timer.stop()
def _measure_thread(self):
""" The threaded method querying the data from the wavemeter.
"""
# update as long as the state is busy
if self._parentclass.getState() == 'running':
# get the current wavelength from the wavemeter
temp1=float(self._parentclass._wavemeterdll.GetWavelength(0))
temp2=float(self._parentclass._wavemeterdll.GetWavelength(0))
# send the data to the parent via a signal
self.sig_wavelength.emit(temp1, temp2)
class HighFinesseWavemeter(Base,WavemeterInterface):
_modclass = 'HighFinesseWavemeter'
_modtype = 'hardware'
## declare connectors
_out = {'highfinessewavemeter': 'WavemeterInterface'}
sig_handle_timer = QtCore.Signal(bool)
#############################################
# Flags for the external DLL
#############################################
# define constants as flags for the wavemeter
_cCtrlStop = ctypes.c_uint16(0x00)
# this following flag is modified to override every existing file
_cCtrlStartMeasurment = ctypes.c_uint16(0x1002)
_cReturnWavelangthAir = ctypes.c_long(0x0001)
_cReturnWavelangthVac = ctypes.c_long(0x0000)
def __init__(self, config, **kwargs):
super().__init__(config=config, **kwargs)
#locking for thread safety
self.threadlock = Mutex()
# the current wavelength read by the wavemeter in nm (vac)
self._current_wavelength=0.0
self._current_wavelength2=0.0
# time between two measurement points of the wavemeter in milliseconds
if 'measurement_timing' in config.keys():
self._measurement_timing=config['measurement_timing']
else:
self._measurement_timing = 10.
self.log.warning('No measurement_timing configured, '\
'using {} instead.'.format(self._measurement_timing))
def on_activate(self, e):
#############################################
# Initialisation to access external DLL
#############################################
try:
# imports the spectrometer specific function from dll
self._wavemeterdll = ctypes.windll.LoadLibrary('wlmData.dll')
except:
self.log.critical('There is no Wavemeter installed on this '
'Computer.\nPlease install a High Finesse Wavemeter and '
'try again.')
# define the use of the GetWavelength function of the wavemeter
# self._GetWavelength2 = self._wavemeterdll.GetWavelength2
# return data type of the GetWavelength function of the wavemeter
self._wavemeterdll.GetWavelength2.restype = ctypes.c_double
# parameter data type of the GetWavelength function of the wavemeter
self._wavemeterdll.GetWavelength2.argtypes = [ctypes.c_double]
# define the use of the GetWavelength function of the wavemeter
# self._GetWavelength = self._wavemeterdll.GetWavelength
# return data type of the GetWavelength function of the wavemeter
self._wavemeterdll.GetWavelength.restype = ctypes.c_double
# parameter data type of the GetWavelength function of the wavemeter
self._wavemeterdll.GetWavelength.argtypes = [ctypes.c_double]
# define the use of the ConvertUnit function of the wavemeter
# self._ConvertUnit = self._wavemeterdll.ConvertUnit
# return data type of the ConvertUnit function of the wavemeter
self._wavemeterdll.ConvertUnit.restype = ctypes.c_double
# parameter data type of the ConvertUnit function of the wavemeter
self._wavemeterdll.ConvertUnit.argtypes = [ctypes.c_double, ctypes.c_long, ctypes.c_long]
# manipulate perdefined operations with simple flags
# self._Operation = self._wavemeterdll.Operation
# return data type of the Operation function of the wavemeter
self._wavemeterdll.Operation.restype = ctypes.c_long
# parameter data type of the Operation function of the wavemeter
self._wavemeterdll.Operation.argtypes = [ctypes.c_ushort]
# create an indepentent thread for the hardware communication
self.hardware_thread = QtCore.QThread()
# create an object for the hardware communication and let it live on the new thread
self._hardware_pull = HardwarePull(self)
self._hardware_pull.moveToThread(self.hardware_thread)
# connect the signals in and out of the threaded object
self.sig_handle_timer.connect(self._hardware_pull.handle_timer)
self._hardware_pull.sig_wavelength.connect(self.handle_wavelength)
# start the event loop for the hardware
self.hardware_thread.start()
def on_deactivate(self, e):
if self.getState() != 'idle' and self.getState() != 'deactivated':
self.stop_acqusition()
self.hardware_thread.quit()
self.sig_handle_timer.disconnect()
self._hardware_pull.sig_wavelength.disconnect()
try:
# clean up by removing reference to the ctypes library object
del self._wavemeterdll
return 0
except:
self.log.error('Could not unload the wlmData.dll of the '
'wavemeter.')
#############################################
# Methods of the main class
#############################################
def handle_wavelength(self, wavelength1, wavelength2):
""" Function to save the wavelength, when it comes in with a signal.
"""
self._current_wavelength = wavelength1
self._current_wavelength2 = wavelength2
def start_acqusition(self):
""" Method to start the wavemeter software.
@return int: error code (0:OK, -1:error)
Also the actual threaded method for getting the current wavemeter reading is started.
"""
# first check its status
if self.getState() == 'running':
self.log.error('Wavemeter busy')
return -1
self.run()
# actually start the wavemeter
self._wavemeterdll.Operation(self._cCtrlStartMeasurment) #starts measurement
# start the measuring thread
self.sig_handle_timer.emit(True)
return 0
def stop_acqusition(self):
""" Stops the Wavemeter from measuring and kills the thread that queries the data.
@return int: error code (0:OK, -1:error)
"""
# check status just for a sanity check
| if self.getState() == 'idle': | 885 | lcc_e | python | null | e59f877d3863977e0c9a383546007617a3bcb1d48ab84132 |
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_link_status
version_added: "2.4"
short_description: Get interface link status on HUAWEI CloudEngine switches.
description:
- Get interface link status on HUAWEI CloudEngine switches.
author:
- Zhijin Zhou (@QijunPan)
notes:
- Current physical state shows an interface's physical status.
- Current link state shows an interface's link layer protocol status.
- Current IPv4 state shows an interface's IPv4 protocol status.
- Current IPv6 state shows an interface's IPv6 protocol status.
- Inbound octets(bytes) shows the number of bytes that an interface received.
- Inbound unicast(pkts) shows the number of unicast packets that an interface received.
- Inbound multicast(pkts) shows the number of multicast packets that an interface received.
- Inbound broadcast(pkts) shows the number of broadcast packets that an interface received.
- Inbound error(pkts) shows the number of error packets that an interface received.
- Inbound drop(pkts) shows the total number of packets that were sent to the interface but dropped by an interface.
- Inbound rate(byte/sec) shows the rate at which an interface receives bytes within an interval.
- Inbound rate(pkts/sec) shows the rate at which an interface receives packets within an interval.
- Outbound octets(bytes) shows the number of the bytes that an interface sent.
- Outbound unicast(pkts) shows the number of unicast packets that an interface sent.
- Outbound multicast(pkts) shows the number of multicast packets that an interface sent.
- Outbound broadcast(pkts) shows the number of broadcast packets that an interface sent.
- Outbound error(pkts) shows the total number of packets that an interface sent but dropped by the remote interface.
- Outbound drop(pkts) shows the number of dropped packets that an interface sent.
- Outbound rate(byte/sec) shows the rate at which an interface sends bytes within an interval.
- Outbound rate(pkts/sec) shows the rate at which an interface sends packets within an interval.
- Speed shows the rate for an Ethernet interface.
options:
interface:
description:
- For the interface parameter, you can enter C(all) to display information about all interface,
an interface type such as C(40GE) to display information about interfaces of the specified type,
or full name of an interface such as C(40GE1/0/22) or C(vlanif10)
to display information about the specific interface.
required: true
'''
EXAMPLES = '''
- name: Link status test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Get specified interface link status information
ce_link_status:
interface: 40GE1/0/1
provider: "{{ cli }}"
- name: Get specified interface type link status information
ce_link_status:
interface: 40GE
provider: "{{ cli }}"
- name: Get all interface link status information
ce_link_status:
interface: all
provider: "{{ cli }}"
'''
RETURN = '''
result:
description: Interface link status information
returned: always
type: dict
sample: {
"40ge2/0/8": {
"Current IPv4 state": "down",
"Current IPv6 state": "down",
"Current link state": "up",
"Current physical state": "up",
"Inbound broadcast(pkts)": "0",
"Inbound drop(pkts)": "0",
"Inbound error(pkts)": "0",
"Inbound multicast(pkts)": "20151",
"Inbound octets(bytes)": "7314813",
"Inbound rate(byte/sec)": "11",
"Inbound rate(pkts/sec)": "0",
"Inbound unicast(pkts)": "0",
"Outbound broadcast(pkts)": "1",
"Outbound drop(pkts)": "0",
"Outbound error(pkts)": "0",
"Outbound multicast(pkts)": "20152",
"Outbound octets(bytes)": "7235021",
"Outbound rate(byte/sec)": "11",
"Outbound rate(pkts/sec)": "0",
"Outbound unicast(pkts)": "0",
"Speed": "40GE"
}
}
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, get_nc_config
CE_NC_GET_PORT_SPEED = """
<filter type="subtree">
<devm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ports>
<port>
<position>%s</position>
<ethernetPort>
<speed></speed>
</ethernetPort>
</port>
</ports>
</devm>
</filter>
"""
CE_NC_GET_INT_STATISTICS = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<ifDynamicInfo>
<ifPhyStatus></ifPhyStatus>
<ifLinkStatus></ifLinkStatus>
<ifV4State></ifV4State>
<ifV6State></ifV6State>
</ifDynamicInfo>
<ifStatistics>
<receiveByte></receiveByte>
<sendByte></sendByte>
<rcvUniPacket></rcvUniPacket>
<rcvMutiPacket></rcvMutiPacket>
<rcvBroadPacket></rcvBroadPacket>
<sendUniPacket></sendUniPacket>
<sendMutiPacket></sendMutiPacket>
<sendBroadPacket></sendBroadPacket>
<rcvErrorPacket></rcvErrorPacket>
<rcvDropPacket></rcvDropPacket>
<sendErrorPacket></sendErrorPacket>
<sendDropPacket></sendDropPacket>
</ifStatistics>
<ifClearedStat>
<inByteRate></inByteRate>
<inPacketRate></inPacketRate>
<outByteRate></outByteRate>
<outPacketRate></outPacketRate>
</ifClearedStat>
</interface>
</interfaces>
</ifm>
</filter>
"""
INTERFACE_ALL = 1
INTERFACE_TYPE = 2
INTERFACE_FULL_NAME = 3
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def is_ethernet_port(interface):
"""Judge whether it is ethernet port"""
ethernet_port = ['ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', 'meth']
if_type = get_interface_type(interface)
if if_type in ethernet_port:
return True
return False
class LinkStatus(object):
"""Get interface link status information"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# interface name
self.interface = self.module.params['interface']
self.interface = self.interface.replace(' ', '').lower()
self.param_type = None
self.if_type = None
# state
self.results = dict()
self.result = dict()
def check_params(self):
"""Check all input params"""
if not self.interface:
self.module.fail_json(msg='Error: Interface name cannot be empty.')
if self.interface and self.interface != 'all':
if not self.if_type:
self.module.fail_json(
msg='Error: Interface name of %s is error.' % self.interface)
def init_module(self):
"""Init module object"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def show_result(self):
"""Show result"""
self.results['result'] = self.result
self.module.exit_json(**self.results)
def get_intf_dynamic_info(self, dyn_info, intf_name):
"""Get interface dynamic information"""
if not intf_name:
return
if dyn_info:
for eles in dyn_info:
if eles.tag in ["ifPhyStatus", "ifV4State", "ifV6State", "ifLinkStatus"]:
if eles.tag == "ifPhyStatus":
self.result[intf_name][
'Current physical state'] = eles.text
elif eles.tag == "ifLinkStatus":
self.result[intf_name][
'Current link state'] = eles.text
elif eles.tag == "ifV4State":
self.result[intf_name][
'Current IPv4 state'] = eles.text
elif eles.tag == "ifV6State":
self.result[intf_name][
'Current IPv6 state'] = eles.text
def get_intf_statistics_info(self, stat_info, intf_name):
"""Get interface statistics information"""
if not intf_name:
return
if_type = get_interface_type(intf_name)
if if_type == 'fcoe-port' or if_type == 'nve' or if_type == 'tunnel' or \
if_type == 'vbdif' or if_type == 'vlanif':
return
if stat_info:
for eles in stat_info:
if eles.tag in ["receiveByte", "sendByte", "rcvUniPacket", "rcvMutiPacket", "rcvBroadPacket",
"sendUniPacket", "sendMutiPacket", "sendBroadPacket", "rcvErrorPacket",
"rcvDropPacket", "sendErrorPacket", "sendDropPacket"]:
if eles.tag == "receiveByte":
self.result[intf_name][
'Inbound octets(bytes)'] = eles.text
elif eles.tag == "rcvUniPacket":
self.result[intf_name][
'Inbound unicast(pkts)'] = eles.text
elif eles.tag == "rcvMutiPacket":
self.result[intf_name][
'Inbound multicast(pkts)'] = eles.text
elif eles.tag == "rcvBroadPacket":
self.result[intf_name][
'Inbound broadcast(pkts)'] = eles.text
elif eles.tag == "rcvErrorPacket":
self.result[intf_name][
'Inbound error(pkts)'] = eles.text
elif eles.tag == "rcvDropPacket":
self.result[intf_name][
'Inbound drop(pkts)'] = eles.text
elif eles.tag == "sendByte":
self.result[intf_name][
'Outbound octets(bytes)'] = eles.text
elif eles.tag == "sendUniPacket":
self.result[intf_name][
'Outbound unicast(pkts)'] = eles.text
elif eles.tag == "sendMutiPacket":
self.result[intf_name][
'Outbound multicast(pkts)'] = eles.text
elif eles.tag == "sendBroadPacket":
self.result[intf_name][
'Outbound broadcast(pkts)'] = eles.text
elif eles.tag == "sendErrorPacket":
self.result[intf_name][
'Outbound error(pkts)'] = eles.text
elif eles.tag == "sendDropPacket":
self.result[intf_name][
'Outbound drop(pkts)'] = eles.text
def get_intf_cleared_stat(self, clr_stat, intf_name):
"""Get interface cleared state information"""
if not intf_name:
return
if_type = get_interface_type(intf_name)
if if_type == 'fcoe-port' or if_type == 'nve' or if_type == 'tunnel' or \
if_type == 'vbdif' or if_type == 'vlanif':
return
if clr_stat:
for eles in clr_stat:
if eles.tag in ["inByteRate", "inPacketRate", "outByteRate", "outPacketRate"]:
if eles.tag == "inByteRate":
self.result[intf_name][
'Inbound rate(byte/sec)'] = eles.text
elif eles.tag == "inPacketRate":
self.result[intf_name][
'Inbound rate(pkts/sec)'] = eles.text
elif eles.tag == "outByteRate":
self.result[intf_name][
'Outbound rate(byte/sec)'] = eles.text
elif eles.tag == "outPacketRate":
self.result[intf_name][
'Outbound rate(pkts/sec)'] = eles.text
def get_all_interface_info(self, intf_type=None):
"""Get interface information all or by interface type"""
xml_str = CE_NC_GET_INT_STATISTICS % ''
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get link status information
root = ElementTree.fromstring(xml_str)
intfs_info = root.find("data/ifm/interfaces")
if not intfs_info:
return
intf_name = ''
flag = False
for eles in intfs_info:
if eles.tag == "interface":
for ele in eles:
if ele.tag in ["ifName", "ifDynamicInfo", "ifStatistics", "ifClearedStat"]:
if ele.tag == "ifName":
intf_name = ele.text.lower()
if intf_type:
if get_interface_type(intf_name) != intf_type.lower():
break
else:
flag = True
self.init_interface_data(intf_name)
if is_ethernet_port(intf_name):
self.get_port_info(intf_name)
if ele.tag == "ifDynamicInfo":
self.get_intf_dynamic_info(ele, intf_name)
elif ele.tag == "ifStatistics":
self.get_intf_statistics_info(ele, intf_name)
elif ele.tag == "ifClearedStat":
self.get_intf_cleared_stat(ele, intf_name)
if intf_type and not flag:
self.module.fail_json(
msg='Error: %s interface type does not exist.' % intf_type.upper())
def get_interface_info(self):
"""Get interface information"""
xml_str = CE_NC_GET_INT_STATISTICS % self.interface.upper()
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
self.module.fail_json(
msg='Error: %s interface does not exist.' % self.interface.upper())
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get link status information
root = ElementTree.fromstring(xml_str)
intf_info = root.find("data/ifm/interfaces/interface")
if intf_info:
for eles in intf_info:
if eles.tag in ["ifDynamicInfo", "ifStatistics", "ifClearedStat"]:
if eles.tag == "ifDynamicInfo":
self.get_intf_dynamic_info(eles, self.interface)
elif eles.tag == "ifStatistics":
self.get_intf_statistics_info(eles, self.interface)
elif eles.tag == "ifClearedStat":
self.get_intf_cleared_stat(eles, self.interface)
def init_interface_data(self, intf_name):
"""Init interface data"""
# init link status data
self.result[intf_name] = dict()
self.result[intf_name]['Current physical state'] = 'down'
self.result[intf_name]['Current link state'] = 'down'
self.result[intf_name]['Current IPv4 state'] = 'down'
self.result[intf_name]['Current IPv6 state'] = 'down'
self.result[intf_name]['Inbound octets(bytes)'] = '--'
self.result[intf_name]['Inbound unicast(pkts)'] = '--'
self.result[intf_name]['Inbound multicast(pkts)'] = '--'
self.result[intf_name]['Inbound broadcast(pkts)'] = '--'
self.result[intf_name]['Inbound error(pkts)'] = '--'
self.result[intf_name]['Inbound drop(pkts)'] = '--'
self.result[intf_name]['Inbound rate(byte/sec)'] = '--'
self.result[intf_name]['Inbound rate(pkts/sec)'] = '--'
self.result[intf_name]['Outbound octets(bytes)'] = '--'
self.result[intf_name]['Outbound unicast(pkts)'] = '--'
self.result[intf_name]['Outbound multicast(pkts)'] = '--'
self.result[intf_name]['Outbound broadcast(pkts)'] = '--'
self.result[intf_name]['Outbound error(pkts)'] = '--'
self.result[intf_name]['Outbound drop(pkts)'] = '--'
self.result[intf_name]['Outbound rate(byte/sec)'] = '--'
self.result[intf_name]['Outbound rate(pkts/sec)'] = '--'
self.result[intf_name]['Speed'] = '--'
def get_port_info(self, interface):
"""Get port information"""
if_type = get_interface_type(interface)
if if_type == 'meth':
xml_str = CE_NC_GET_PORT_SPEED % interface.lower().replace('meth', 'MEth')
else:
xml_str = CE_NC_GET_PORT_SPEED % interface.upper()
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get link status information
root = ElementTree.fromstring(xml_str)
port_info = root.find("data/devm/ports/port")
if port_info:
for eles in port_info:
if eles.tag == "ethernetPort":
for ele in eles:
if ele.tag == 'speed':
self.result[interface]['Speed'] = ele.text
def get_link_status(self):
"""Get link status information"""
if self.param_type == INTERFACE_FULL_NAME:
self.init_interface_data(self.interface)
self.get_interface_info()
if is_ethernet_port(self.interface):
self.get_port_info(self.interface)
elif self.param_type == INTERFACE_TYPE:
self.get_all_interface_info(self.interface)
else:
self.get_all_interface_info()
def get_intf_param_type(self):
"""Get the type of input interface parameter"""
| if self.interface == 'all': | 1,718 | lcc_e | python | null | ac75f42af7295a6d31f508d44bcc5ac42fd665147ac86554 |
|
"""
Convert human-editable CSV files into JSON files, used by the web application.
"""
import json
import csv
from io import StringIO
from datetime import datetime
################################################################################
# CONFIG
# behavior categories to include in the JSON file
categories = set(('G', 'M', 'W', 'C', 'F', 'H', 'I', 'P', 'V',)) #'A', 'L', 'O', 'E', 'S'
# time of first GPS point
firstGPStime = datetime(2014,1,24,5,36,14)
# seconds between each GPS point
intervalseconds = 60
class InFileNames:
observations = 'behavior observation codes.csv'
translations = 'behavior code translations.csv'
mediafeatures = 'media features.json'
gpstrack = 'GPS track.csv'
pictures = 'pictures.csv'
textbubbles = 'text bubbles.csv'
videos = 'videos.csv'
class OutFileNames:
behavior = 'behavior.json' # observations + translations
behaviorcsv = 'behavior observation data.csv'
media = 'media.js' # pictures, videos, text, media features
tourIntro = {
'loc': [10.5142232962, -85.3693762701],
'note': 'intro',
'data': [],
'time': '05:30:00',
}
tourStart = {
'loc': [10.5142232962, -85.3693762701],
'note': 'start',
'data': [],
'time': '05:30:00',
}
tourEnd = {
'loc': [10.5143646989, -85.3639992792], #[10.5148555432, -85.3643822484],
'note': 'end',
'data': [],
'time': '18:10:43',
}
# monkey patch json encoder to format floats
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.5f')
################################################################################
# GPS track
with open(InFileNames.gpstrack) as f:
reader = csv.reader(f, skipinitialspace=True)
GPStrack = [(float(lat[:9]), float(lon[:10])) for (lat,lon) in list(reader)[1:]]
def parsetime(timestr):
"""
Get the time from a string, ignore the date.
(Return a datetime with the date of the first GPS point.)
"""
# take out the date (get only the last space-separated part)
timestr = timestr.split()[-1]
time = datetime.strptime(timestr, '%H:%M:%S').time()
return datetime.combine(firstGPStime.date(), time)
def getTimeInterval(time):
"""
Get start and end points on the GPS track, of the time interval containing "time".
"""
index = int((time - firstGPStime).total_seconds() / intervalseconds)
interval = GPStrack[index:index+2]
if len(interval) == 2:
return interval
# if the time is past the last GPS point, return an interval with just the last GPS point
else:
return (GPStrack[-1], GPStrack[-1])
def getGPSCoords(time):
"""
Get a geographical point along Winslow Homer's GPS track, by linear interpolation
"""
# get start and stop
start, stop = getTimeInterval(time)
timediff = (time - firstGPStime).total_seconds()
proportion = (timediff % intervalseconds) / float(intervalseconds)
latdelta = (stop[0] - start[0])
lat = (proportion * latdelta) + start[0]
londelta = (stop[1] - start[1])
lon = (proportion * londelta) + start[1]
return (lat, lon)
def loadTranslationsFile():
"""
Load the translations file, return a list of dicts with the fields in the file
"""
with open(InFileNames.translations) as f:
reader = csv.DictReader(f, skipinitialspace=True)
return list(reader)
def loadObservationFile(translations=None):
"""
Load the observations file, return a list with a dict for each observation
record, and a set with all of the unique behavior codes.
"""
# ordered list of observations in file
observations = []
# set of codes we've seen
codes = set()
with open(InFileNames.observations) as f:
reader = csv.DictReader(f, skipinitialspace=True)
for line in reader:
# look up GPS coordinates from timestamp
line['loc'] = getGPSCoords(parsetime(line['timestamp']))
# add a 'time' field without the date, to display to user
line['time'] = line['timestamp'].split()[1]
observations.append(line)
codes.add(line['code'])
return observations, codes
def filterObservationsTranslations():
"""
Return (observations, translations) list containing the intersection
(inner join) of the observations and translations, and only in the
configured categories.
"""
translations = loadTranslationsFile()
observations, obs_code_set = loadObservationFile()
# Find codes that occur in the observations, and are in the right categories.
# Make a {code : translation-fields} 2-dimensional dict.
translations_dict = {
t['code'] : t
for t in translations
if (t['code'] in obs_code_set) and (t['category'].upper() in categories) }
# Find observations that have a translation.
observations = list(filter(lambda o: o['code'] in translations_dict, observations))
return observations, translations_dict
def writeBehaviorJSON(observations, translations_dict, tourlist):
"""
Write behavior JSON file, with observations and translations joined.
"""
#observations, translations_dict = filterObservationsTranslations()
# join together observations with translations
| behavior_list = [ checkOnTour(tourlist, o, | 609 | lcc_e | python | null | a9b4285cd9b366f640dd9663e9dd44dfdc153065ee32a6d8 |
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010-2017 Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
StatisticsPage
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from decimal import getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.lib import (Person, Family, Event, Place, Source,
Citation, Repository)
from gramps.gen.plug.report import Bibliography
from gramps.gen.utils.file import media_path_full
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import FULLCLEAR
LOG = logging.getLogger(".NarrativeWeb")
getcontext().prec = 8
_ = glocale.translation.sgettext
class StatisticsPage(BasePage):
"""
Create one page for statistics
"""
def __init__(self, report, title, step):
"""
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
"""
import posixpath
BasePage.__init__(self, report, title)
self.bibli = Bibliography()
self.uplink = False
self.report = report
# set the file name and open file
output_file, sio = self.report.create_file("statistics")
addressbookpage, head, body = self.write_header(_("Statistics"))
(males,
females,
unknown) = self.get_gender(report.database.iter_person_handles())
step()
mobjects = report.database.get_number_of_media()
npersons = report.database.get_number_of_people()
nfamilies = report.database.get_number_of_families()
nsurnames = len(set(report.database.surname_list))
notfound = []
total_media = 0
mbytes = "0"
chars = 0
for media in report.database.iter_media():
total_media += 1
fullname = media_path_full(report.database, media.get_path())
try:
chars += posixpath.getsize(fullname)
length = len(str(chars))
if chars <= 999999:
mbytes = _("less than 1")
else:
mbytes = str(chars)[:(length-6)]
except OSError:
notfound.append(media.get_path())
with Html("div", class_="content", id='EventDetail') as section:
section += Html("h3", self._("Database overview"), inline=True)
body += section
with Html("div", class_="content", id='subsection narrative') as sec11:
sec11 += Html("h4", self._("Individuals"), inline=True)
body += sec11
with Html("div", class_="content", id='subsection narrative') as sec1:
sec1 += Html("br", self._("Number of individuals") + self.colon +
"%d" % npersons, inline=True)
sec1 += Html("br", self._("Males") + self.colon +
"%d" % males, inline=True)
sec1 += Html("br", self._("Females") + self.colon +
"%d" % females, inline=True)
sec1 += Html("br", self._("Individuals with unknown gender") +
self.colon + "%d" % unknown, inline=True)
body += sec1
with Html("div", class_="content", id='subsection narrative') as sec2:
sec2 += Html("h4", self._("Family Information"), inline=True)
sec2 += Html("br", self._("Number of families") + self.colon +
"%d" % nfamilies, inline=True)
sec2 += Html("br", self._("Unique surnames") + self.colon +
"%d" % nsurnames, inline=True)
body += sec2
with Html("div", class_="content", id='subsection narrative') as sec3:
sec3 += Html("h4", self._("Media Objects"), inline=True)
sec3 += Html("br",
self._("Total number of media object references") +
self.colon + "%d" % total_media, inline=True)
sec3 += Html("br", self._("Number of unique media objects") +
self.colon + "%d" % mobjects, inline=True)
sec3 += Html("br", self._("Total size of media objects") +
self.colon +
"%8s %s" % (mbytes, self._("Megabyte|MB")),
inline=True)
sec3 += Html("br", self._("Missing Media Objects") +
self.colon + "%d" % len(notfound), inline=True)
body += sec3
with Html("div", class_="content", id='subsection narrative') as sec4:
sec4 += Html("h4", self._("Miscellaneous"), inline=True)
sec4 += Html("br", self._("Number of events") + self.colon +
"%d" % report.database.get_number_of_events(),
inline=True)
sec4 += Html("br", self._("Number of places") + self.colon +
"%d" % report.database.get_number_of_places(),
inline=True)
nsources = report.database.get_number_of_sources()
sec4 += Html("br", self._("Number of sources") +
self.colon + "%d" % nsources,
inline=True)
ncitations = report.database.get_number_of_citations()
sec4 += Html("br", self._("Number of citations") +
self.colon + "%d" % ncitations,
inline=True)
nrepo = report.database.get_number_of_repositories()
sec4 += Html("br", self._("Number of repositories") +
self.colon + "%d" % nrepo,
inline=True)
body += sec4
(males,
females,
unknown) = self.get_gender(self.report.bkref_dict[Person].keys())
origin = " :<br/>" + report.filter.get_name(self.rlocale)
with Html("div", class_="content", id='EventDetail') as section:
section += Html("h3",
self._("Narrative web content report for") + origin,
inline=True)
body += section
with Html("div", class_="content", id='subsection narrative') as sec5:
sec5 += Html("h4", self._("Individuals"), inline=True)
sec5 += Html("br", self._("Number of individuals") + self.colon +
"%d" % len(self.report.bkref_dict[Person]),
inline=True)
sec5 += Html("br", self._("Males") + self.colon +
"%d" % males, inline=True)
sec5 += Html("br", self._("Females") + self.colon +
"%d" % females, inline=True)
sec5 += Html("br", self._("Individuals with unknown gender") +
self.colon + "%d" % unknown, inline=True)
body += sec5
with Html("div", class_="content", id='subsection narrative') as sec6:
sec6 += Html("h4", self._("Family Information"), inline=True)
sec6 += Html("br", self._("Number of families") + self.colon +
"%d" % len(self.report.bkref_dict[Family]),
inline=True)
body += sec6
with Html("div", class_="content", id='subsection narrative') as sec7:
sec7 += Html("h4", self._("Miscellaneous"), inline=True)
sec7 += Html("br", self._("Number of events") + self.colon +
"%d" % len(self.report.bkref_dict[Event]),
inline=True)
sec7 += Html("br", self._("Number of places") + self.colon +
"%d" % len(self.report.bkref_dict[Place]),
inline=True)
sec7 += Html("br", self._("Number of sources") + self.colon +
"%d" % len(self.report.bkref_dict[Source]),
inline=True)
sec7 += Html("br", self._("Number of citations") + self.colon +
"%d" % len(self.report.bkref_dict[Citation]),
inline=True)
sec7 += Html("br", self._("Number of repositories") + self.colon +
"%d" % len(self.report.bkref_dict[Repository]),
inline=True)
body += sec7
# add fullclear for proper styling
# and footer section to page
| footer = self.write_footer(None) | 960 | lcc_e | python | null | eed31a60b2b619d0d6e7f0a366fbd64a29d110c6a51d5a73 |
|
#!/usr/bin/env python
'''
Fly Helicopter in SITL
AP_FLAKE8_CLEAN
'''
from __future__ import print_function
from arducopter import AutoTestCopter
from common import AutoTest
from common import NotAchievedException, AutoTestTimeoutException
from pymavlink import mavutil
from pysim import vehicleinfo
class AutoTestHelicopter(AutoTestCopter):
sitl_start_loc = mavutil.location(40.072842, -105.230575, 1586, 0) # Sparkfun AVC Location
def vehicleinfo_key(self):
return 'Helicopter'
def log_name(self):
return "HeliCopter"
def default_frame(self):
return "heli"
def sitl_start_location(self):
return self.sitl_start_loc
def default_speedup(self):
'''Heli seems to be race-free'''
return 100
def is_heli(self):
return True
def rc_defaults(self):
ret = super(AutoTestHelicopter, self).rc_defaults()
ret[8] = 1000
ret[3] = 1000 # collective
return ret
@staticmethod
def get_position_armable_modes_list():
'''filter THROW mode out of armable modes list; Heli is special-cased'''
ret = AutoTestCopter.get_position_armable_modes_list()
ret = filter(lambda x : x != "THROW", ret)
return ret
def loiter_requires_position(self):
self.progress("Skipping loiter-requires-position for heli; rotor runup issues")
def get_collective_out(self):
servo = self.mav.recv_match(type='SERVO_OUTPUT_RAW', blocking=True)
chan_pwm = (servo.servo1_raw + servo.servo2_raw + servo.servo3_raw)/3.0
return chan_pwm
def rotor_runup_complete_checks(self):
# Takeoff and landing in Loiter
TARGET_RUNUP_TIME = 10
self.zero_throttle()
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
servo = self.mav.recv_match(type='SERVO_OUTPUT_RAW', blocking=True)
coll = servo.servo1_raw
coll = coll + 50
self.set_parameter("H_RSC_RUNUP_TIME", TARGET_RUNUP_TIME)
self.progress("Initiate Runup by putting some throttle")
self.set_rc(8, 2000)
self.set_rc(3, 1700)
self.progress("Collective threshold PWM %u" % coll)
tstart = self.get_sim_time()
self.progress("Wait that collective PWM pass threshold value")
servo = self.mav.recv_match(condition='SERVO_OUTPUT_RAW.servo1_raw>%u' % coll, blocking=True)
runup_time = self.get_sim_time() - tstart
self.progress("Collective is now at PWM %u" % servo.servo1_raw)
self.mav.wait_heartbeat()
if runup_time < TARGET_RUNUP_TIME:
self.zero_throttle()
self.set_rc(8, 1000)
self.disarm_vehicle()
self.mav.wait_heartbeat()
raise NotAchievedException("Takeoff initiated before runup time complete %u" % runup_time)
self.progress("Runup time %u" % runup_time)
self.zero_throttle()
self.set_rc(8, 1000)
self.land_and_disarm()
self.mav.wait_heartbeat()
# fly_avc_test - fly AVC mission
def fly_avc_test(self):
# Arm
self.change_mode('STABILIZE')
self.wait_ready_to_arm()
self.arm_vehicle()
self.progress("Raising rotor speed")
self.set_rc(8, 2000)
# upload mission from file
self.progress("# Load copter_AVC2013_mission")
# load the waypoint count
num_wp = self.load_mission("copter_AVC2013_mission.txt", strict=False)
if not num_wp:
raise NotAchievedException("load copter_AVC2013_mission failed")
self.progress("Fly AVC mission from 1 to %u" % num_wp)
self.set_current_waypoint(1)
# wait for motor runup
self.delay_sim_time(20)
# switch into AUTO mode and raise throttle
self.change_mode('AUTO')
self.set_rc(3, 1500)
# fly the mission
self.wait_waypoint(0, num_wp-1, timeout=500)
# set throttle to minimum
self.zero_throttle()
# wait for disarm
self.wait_disarmed()
self.progress("MOTORS DISARMED OK")
self.progress("Lowering rotor speed")
self.set_rc(8, 1000)
self.progress("AVC mission completed: passed!")
def takeoff(self,
alt_min=30,
takeoff_throttle=1700,
require_absolute=True,
mode="STABILIZE",
timeout=120):
"""Takeoff get to 30m altitude."""
self.progress("TAKEOFF")
self.change_mode(mode)
if not self.armed():
self.wait_ready_to_arm(require_absolute=require_absolute, timeout=timeout)
self.zero_throttle()
self.arm_vehicle()
self.progress("Raising rotor speed")
self.set_rc(8, 2000)
self.progress("wait for rotor runup to complete")
self.wait_servo_channel_value(8, 1660, timeout=10)
if mode == 'GUIDED':
self.user_takeoff(alt_min=alt_min)
else:
self.set_rc(3, takeoff_throttle)
self.wait_for_alt(alt_min=alt_min, timeout=timeout)
self.hover()
self.progress("TAKEOFF COMPLETE")
def fly_each_frame(self):
vinfo = vehicleinfo.VehicleInfo()
vinfo_options = vinfo.options[self.vehicleinfo_key()]
known_broken_frames = {
}
for frame in sorted(vinfo_options["frames"].keys()):
self.start_subtest("Testing frame (%s)" % str(frame))
if frame in known_broken_frames:
self.progress("Actually, no I'm not - it is known-broken (%s)" %
(known_broken_frames[frame]))
continue
frame_bits = vinfo_options["frames"][frame]
print("frame_bits: %s" % str(frame_bits))
if frame_bits.get("external", False):
self.progress("Actually, no I'm not - it is an external simulation")
continue
model = frame_bits.get("model", frame)
# the model string for Callisto has crap in it.... we
# should really have another entry in the vehicleinfo data
# to carry the path to the JSON.
actual_model = model.split(":")[0]
defaults = self.model_defaults_filepath(actual_model)
if type(defaults) != list:
defaults = [defaults]
self.customise_SITL_commandline(
["--defaults", ','.join(defaults), ],
model=model,
wipe=True,
)
self.takeoff(10)
self.do_RTL()
self.set_rc(8, 1000)
def hover(self):
self.progress("Setting hover collective")
self.set_rc(3, 1500)
def fly_heli_poshold_takeoff(self):
"""ensure vehicle stays put until it is ready to fly"""
self.context_push()
ex = None
try:
self.set_parameter("PILOT_TKOFF_ALT", 700)
self.change_mode('POSHOLD')
self.zero_throttle()
self.set_rc(8, 1000)
self.wait_ready_to_arm()
# Arm
self.arm_vehicle()
self.progress("Raising rotor speed")
self.set_rc(8, 2000)
self.progress("wait for rotor runup to complete")
self.wait_servo_channel_value(8, 1660, timeout=10)
self.delay_sim_time(20)
# check we are still on the ground...
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
max_relalt_mm = 1000
| if abs(m.relative_alt) > max_relalt_mm: | 577 | lcc_e | python | null | afc69ae78edec70b690890e285882cf26620106e9a4ecc3d |
|
import json
from django.core.management import call_command
from django.test import TestCase
from bulk_adding.models import RawPeople
from candidates.tests.uk_examples import UK2015ExamplesMixin
from official_documents.models import OfficialDocument
from parties.tests.factories import PartyFactory
from parties.tests.fixtures import DefaultPartyFixtures
from sopn_parsing.models import ParsedSOPN
from sopn_parsing.helpers import parse_tables
from ynr.apps.sopn_parsing.management.commands.sopn_parsing_parse_tables import (
Command as ParseTablesCommand,
)
from unittest import skipIf
from pandas import Index, Series
from sopn_parsing.tests import should_skip_pdf_tests
class TestSOPNHelpers(DefaultPartyFixtures, UK2015ExamplesMixin, TestCase):
def setUp(self):
PartyFactory(ec_id="PP85", name="UK Independence Party (UKIP)")
@skipIf(should_skip_pdf_tests(), "Required PDF libs not installed")
def test_basic_parsing(self):
self.assertFalse(RawPeople.objects.exists())
doc = OfficialDocument.objects.create(
ballot=self.dulwich_post_ballot,
document_type=OfficialDocument.NOMINATION_PAPER,
source_url="example.com",
relevant_pages="all",
)
dataframe = json.dumps(
{
"0": {
"0": "Name of \nCandidate",
"1": "BRADBURY \nAndrew John",
"2": "COLLINS \nDave",
"3": "HARVEY \nPeter John",
"4": "JENNER \nMelanie",
},
"1": {
"0": "Home Address",
"1": "10 Fowey Close, \nShoreham by Sea, \nWest Sussex, \nBN43 5HE",
"2": "51 Old Fort Road, \nShoreham by Sea, \nBN43 5RL",
"3": "76 Harbour Way, \nShoreham by Sea, \nSussex, \nBN43 5HH",
"4": "9 Flag Square, \nShoreham by Sea, \nWest Sussex, \nBN43 5RZ",
},
"2": {
"0": "Description (if \nany)",
"1": "Green Party",
"2": "Independent",
"3": "UK Independence \nParty (UKIP)",
"4": "Labour Party",
},
"3": {
"0": "Name of \nProposer",
"1": "Tiffin Susan J",
"2": "Loader Jocelyn C",
"3": "Hearne James H",
"4": "O`Connor Lavinia",
},
"4": {
"0": "Reason \nwhy no \nlonger \nnominated\n*",
"1": "",
"2": "",
"3": "",
"4": "",
},
}
)
ParsedSOPN.objects.create(
sopn=doc, raw_data=dataframe, status="unparsed"
)
call_command("sopn_parsing_parse_tables")
self.assertEqual(RawPeople.objects.count(), 1)
raw_people = RawPeople.objects.get()
self.assertEqual(
raw_people.data,
[
{"name": "Andrew John Bradbury", "party_id": "PP63"},
{"name": "Dave Collins", "party_id": "ynmp-party:2"},
{"name": "Peter John Harvey", "party_id": "PP85"},
{"name": "Melanie Jenner", "party_id": "PP53"},
],
)
class TestParseTablesUnitTests(TestCase):
def get_two_name_field_cases(self):
# this could be updated with more combinations as we come across them
return [
{
"name_fields": ["candidate surname", "candidate forename"],
"row": {
"candidate surname": "BAGSHAW",
"candidate forename": "Elaine Sheila",
"home address": "1 Foo Street \n London \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
"ordered_name_fields": [
"candidate forename",
"candidate surname",
],
"expected_name": "Elaine Sheila Bagshaw",
},
{
"name_fields": ["surname", "other names"],
"row": {
"surname": "BAGSHAW",
"other names": "Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
"ordered_name_fields": ["other names", "surname"],
"expected_name": "Elaine Sheila Bagshaw",
},
{
"name_fields": ["last name", "other names"],
"row": {
"last name": "BAGSHAW",
"other names": "Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
"ordered_name_fields": ["other names", "last name"],
"expected_name": "Elaine Sheila Bagshaw",
},
{
"name_fields": ["candidate forename", "candidate surname"],
"row": {
"candidate forename": "Elaine Sheila",
"candidate surname": "BAGSHAW",
"home address": "1 Foo Street \n London \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
"ordered_name_fields": [
"candidate forename",
"candidate surname",
],
"expected_name": "Elaine Sheila Bagshaw",
},
]
def get_single_name_field_cases(self):
return [
{
"name_fields": ["name of candidate"],
"row": {
"name of candidate": "BAGSHAW Elaine Sheila",
"home address": "1 Foo Street \n London \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
},
{
"name_fields": ["names of candidate"],
"row": {
"names of candidate": "BAGSHAW Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
},
{
"name_fields": ["candidate name"],
"row": {
"candidate name": "BAGSHAW Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
},
{
"name_fields": ["surname"],
"row": {
"surname": "BAGSHAW Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
},
{
"name_fields": ["candidates surname"],
"row": {
"candidates surname": "BAGSHAW Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
},
{
"name_fields": ["other name"],
"row": {
"other name": "BAGSHAW Elaine Sheila",
"home address": "1 Foo Street \nLondon \nE14 6FW",
"description": "London Liberal \nDemocrats",
"reason why no longer nominated": "",
},
},
]
def test_get_name_single_field(self):
for case in self.get_single_name_field_cases():
row = Series(case["row"])
name_fields = case["name_fields"]
with self.subTest(name_fields=name_fields):
assert len(case["name_fields"]) == 1
name = parse_tables.get_name(row=row, name_fields=name_fields)
assert name == "Elaine Sheila Bagshaw"
def test_get_name_two_fields(self):
for case in self.get_two_name_field_cases():
row = Series(case["row"])
name_fields = case["name_fields"]
with self.subTest(name_fields=name_fields):
assert len(case["name_fields"]) == 2
name = parse_tables.get_name(row=row, name_fields=name_fields)
assert name == case["expected_name"]
def test_get_name_fields_single(self):
for case in self.get_single_name_field_cases():
row = Index(case["row"])
with self.subTest(row=row):
name_fields = parse_tables.get_name_fields(row=row)
assert len(name_fields) == 1
assert name_fields == case["name_fields"]
def test_get_name_fields_two(self):
for case in self.get_two_name_field_cases():
row = Index(case["row"])
with self.subTest(row=row):
name_fields = parse_tables.get_name_fields(row=row)
assert len(name_fields) == 2
assert name_fields == case["name_fields"]
def test_get_name_fields_raises_error(self):
row = Index({"foo": "Bar"})
with self.assertRaises(ValueError):
parse_tables.get_name_fields(row=row)
def test_order_name_fields(self):
for case in self.get_two_name_field_cases():
name_fields = case["name_fields"]
with self.subTest(name_fields=name_fields):
result = parse_tables.order_name_fields(name_fields)
assert result == case["ordered_name_fields"]
def test_clean_name_replaces_backticks(self):
name = parse_tables.clean_name("D`SOUZA")
assert "`" not in name
assert "'" in name
def test_clean_name_replaces_newlines(self):
name = parse_tables.clean_name(
"A Very Long Name That Splits \nOver Lines"
)
assert "\n" not in name
def test_clean_name_capitalized_last_and_titalized(self):
name = parse_tables.clean_name("SMITH John")
assert name == "John Smith"
def test_clean_last_names(self):
name = parse_tables.clean_last_names(["MACDONALD", "John"])
assert name == "MacDonald"
def test_clean_name_two_word_surnames(self):
names = [
("EDE COOPER \nPalmer", "Palmer Ede Cooper"),
("VAN DULKEN \nRichard Michael", "Richard Michael Van Dulken"),
("ARMSTRONG LILLEY \nLynne", "Lynne Armstrong Lilley"),
(
" D`SOUZA Aaron Anthony Jose \nHasan",
"Aaron Anthony Jose Hasan D'Souza",
),
("Michael James Collins", "Michael James Collins"),
(" Michael James Collins ", "Michael James Collins"),
("DAVE Nitesh Pravin", "Nitesh Pravin Dave"),
("DAVE\nNitesh Pravin", "Nitesh Pravin Dave"),
("COOKE Anne-Marie", "Anne-Marie Cooke"),
("COOKE\nAnne-Marie", "Anne-Marie Cooke"),
("BROOKES-\nDUNCAN\nKaty", "Katy Brookes-Duncan"),
("HOUNSOME\nJohn", "John Hounsome"),
("O`CONNELL \nStephen John", "Stephen John O'Connell"),
| ("O`NEAL \nCarol Joy", "Carol Joy O'Neal"), | 919 | lcc_e | python | null | 471d9cb59a1c21652129c86006d9d0c5592f5e0867def0cb |
|
"""
This module provides an abstraction for working with XModuleDescriptors
that are stored in a database an accessible using their Location as an identifier
"""
import logging
import re
import json
import datetime
from uuid import uuid4
from pytz import UTC
from collections import namedtuple, defaultdict
import collections
from contextlib import contextmanager
import functools
import threading
from operator import itemgetter
from sortedcontainers import SortedListWithKey
from abc import ABCMeta, abstractmethod
from contracts import contract, new_contract
from xblock.plugin import default_select
from .exceptions import InvalidLocationError, InsufficientSpecificationError
from xmodule.errortracker import make_error_tracker
from xmodule.assetstore import AssetMetadata
from opaque_keys.edx.keys import CourseKey, UsageKey, AssetKey
from opaque_keys.edx.locations import Location # For import backwards compatibility
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xblock.runtime import Mixologist
from xblock.core import XBlock
log = logging.getLogger('edx.modulestore')
new_contract('CourseKey', CourseKey)
new_contract('AssetKey', AssetKey)
new_contract('AssetMetadata', AssetMetadata)
new_contract('XBlock', XBlock)
LIBRARY_ROOT = 'library.xml'
COURSE_ROOT = 'course.xml'
class ModuleStoreEnum(object):
"""
A class to encapsulate common constants that are used with the various modulestores.
"""
class Type(object):
"""
The various types of modulestores provided
"""
split = 'split'
mongo = 'mongo'
xml = 'xml'
class RevisionOption(object):
"""
Revision constants to use for Module Store operations
Note: These values are passed into store APIs and only used at run time
"""
# both DRAFT and PUBLISHED versions are queried, with preference to DRAFT versions
draft_preferred = 'rev-opt-draft-preferred'
# only DRAFT versions are queried and no PUBLISHED versions
draft_only = 'rev-opt-draft-only'
# # only PUBLISHED versions are queried and no DRAFT versions
published_only = 'rev-opt-published-only'
# all revisions are queried
all = 'rev-opt-all'
class Branch(object):
"""
Branch constants to use for stores, such as Mongo, that have only 2 branches: DRAFT and PUBLISHED
Note: These values are taken from server configuration settings, so should not be changed without alerting DevOps
"""
draft_preferred = 'draft-preferred'
published_only = 'published-only'
class BranchName(object):
"""
Branch constants to use for stores, such as Split, that have named branches
"""
draft = 'draft-branch'
published = 'published-branch'
library = 'library'
class UserID(object):
"""
Values for user ID defaults
"""
# Note: we use negative values here to (try to) not collide
# with user identifiers provided by actual user services.
# user ID to use for all management commands
mgmt_command = -1
# user ID to use for primitive commands
primitive_command = -2
# user ID to use for tests that do not have a django user available
test = -3
class SortOrder(object):
"""
Values for sorting asset metadata.
"""
ascending = 1
descending = 2
class BulkOpsRecord(object):
"""
For handling nesting of bulk operations
"""
def __init__(self):
self._active_count = 0
@property
def active(self):
"""
Return whether this bulk write is active.
"""
return self._active_count > 0
def nest(self):
"""
Record another level of nesting of this bulk write operation
"""
self._active_count += 1
def unnest(self):
"""
Record the completion of a level of nesting of the bulk write operation
"""
self._active_count -= 1
@property
def is_root(self):
"""
Return whether the bulk write is at the root (first) level of nesting
"""
return self._active_count == 1
class ActiveBulkThread(threading.local):
"""
Add the expected vars to the thread.
"""
def __init__(self, bulk_ops_record_type, **kwargs):
super(ActiveBulkThread, self).__init__(**kwargs)
self.records = defaultdict(bulk_ops_record_type)
class BulkOperationsMixin(object):
"""
This implements the :meth:`bulk_operations` modulestore semantics which handles nested invocations
In particular, it implements :meth:`_begin_bulk_operation` and
:meth:`_end_bulk_operation` to provide the external interface
Internally, this mixin records the set of all active bulk operations (keyed on the active course),
and only writes those values when :meth:`_end_bulk_operation` is called.
If a bulk write operation isn't active, then the changes are immediately written to the underlying
mongo_connection.
"""
def __init__(self, *args, **kwargs):
super(BulkOperationsMixin, self).__init__(*args, **kwargs)
self._active_bulk_ops = ActiveBulkThread(self._bulk_ops_record_type)
@contextmanager
def bulk_operations(self, course_id, emit_signals=True):
"""
A context manager for notifying the store of bulk operations. This affects only the current thread.
In the case of Mongo, it temporarily disables refreshing the metadata inheritance tree
until the bulk operation is completed.
"""
try:
self._begin_bulk_operation(course_id)
yield
finally:
self._end_bulk_operation(course_id, emit_signals)
# the relevant type of bulk_ops_record for the mixin (overriding classes should override
# this variable)
_bulk_ops_record_type = BulkOpsRecord
def _get_bulk_ops_record(self, course_key, ignore_case=False):
"""
Return the :class:`.BulkOpsRecord` for this course.
"""
if course_key is None:
return self._bulk_ops_record_type()
# Retrieve the bulk record based on matching org/course/run (possibly ignoring case)
if ignore_case:
for key, record in self._active_bulk_ops.records.iteritems():
# Shortcut: check basic equivalence for cases where org/course/run might be None.
if key == course_key or (
key.org.lower() == course_key.org.lower() and
key.course.lower() == course_key.course.lower() and
key.run.lower() == course_key.run.lower()
):
return record
return self._active_bulk_ops.records[course_key.for_branch(None)]
@property
def _active_records(self):
"""
Yield all active (CourseLocator, BulkOpsRecord) tuples.
"""
for course_key, record in self._active_bulk_ops.records.iteritems():
if record.active:
yield (course_key, record)
def _clear_bulk_ops_record(self, course_key):
"""
Clear the record for this course
"""
del self._active_bulk_ops.records[course_key.for_branch(None)]
def _start_outermost_bulk_operation(self, bulk_ops_record, course_key):
"""
The outermost nested bulk_operation call: do the actual begin of the bulk operation.
Implementing classes must override this method; otherwise, the bulk operations are a noop
"""
pass
def _begin_bulk_operation(self, course_key):
"""
Begin a bulk operation on course_key.
"""
bulk_ops_record = self._get_bulk_ops_record(course_key)
# Increment the number of active bulk operations (bulk operations
# on the same course can be nested)
bulk_ops_record.nest()
# If this is the highest level bulk operation, then initialize it
if bulk_ops_record.is_root:
self._start_outermost_bulk_operation(bulk_ops_record, course_key)
def _end_outermost_bulk_operation(self, bulk_ops_record, course_key, emit_signals=True):
"""
The outermost nested bulk_operation call: do the actual end of the bulk operation.
Implementing classes must override this method; otherwise, the bulk operations are a noop
"""
pass
def _end_bulk_operation(self, course_key, emit_signals=True):
"""
End the active bulk operation on course_key.
"""
# If no bulk op is active, return
bulk_ops_record = self._get_bulk_ops_record(course_key)
if not bulk_ops_record.active:
return
bulk_ops_record.unnest()
# If this wasn't the outermost context, then don't close out the
# bulk operation.
if bulk_ops_record.active:
return
self._end_outermost_bulk_operation(bulk_ops_record, course_key, emit_signals)
self._clear_bulk_ops_record(course_key)
def _is_in_bulk_operation(self, course_key, ignore_case=False):
"""
Return whether a bulk operation is active on `course_key`.
"""
return self._get_bulk_ops_record(course_key, ignore_case).active
class EditInfo(object):
"""
Encapsulates the editing info of a block.
"""
def __init__(self, **kwargs):
self.from_storable(kwargs)
# For details, see caching_descriptor_system.py get_subtree_edited_by/on.
self._subtree_edited_on = kwargs.get('_subtree_edited_on', None)
self._subtree_edited_by = kwargs.get('_subtree_edited_by', None)
def to_storable(self):
"""
Serialize to a Mongo-storable format.
"""
return {
'previous_version': self.previous_version,
'update_version': self.update_version,
'source_version': self.source_version,
'edited_on': self.edited_on,
'edited_by': self.edited_by,
'original_usage': self.original_usage,
'original_usage_version': self.original_usage_version,
}
def from_storable(self, edit_info):
"""
De-serialize from Mongo-storable format to an object.
"""
# Guid for the structure which previously changed this XBlock.
# (Will be the previous value of 'update_version'.)
self.previous_version = edit_info.get('previous_version', None)
# Guid for the structure where this XBlock got its current field values.
# May point to a structure not in this structure's history (e.g., to a draft
# branch from which this version was published).
self.update_version = edit_info.get('update_version', None)
self.source_version = edit_info.get('source_version', None)
# Datetime when this XBlock's fields last changed.
self.edited_on = edit_info.get('edited_on', None)
# User ID which changed this XBlock last.
self.edited_by = edit_info.get('edited_by', None)
self.original_usage = edit_info.get('original_usage', None)
self.original_usage_version = edit_info.get('original_usage_version', None)
def __str__(self):
return ("EditInfo(previous_version={0.previous_version}, "
"update_version={0.update_version}, "
"source_version={0.source_version}, "
"edited_on={0.edited_on}, "
"edited_by={0.edited_by}, "
"original_usage={0.original_usage}, "
"original_usage_version={0.original_usage_version}, "
"_subtree_edited_on={0._subtree_edited_on}, "
"_subtree_edited_by={0._subtree_edited_by})").format(self)
class BlockData(object):
"""
Wrap the block data in an object instead of using a straight Python dictionary.
Allows the storing of meta-information about a structure that doesn't persist along with
the structure itself.
"""
def __init__(self, **kwargs):
# Has the definition been loaded?
self.definition_loaded = False
self.from_storable(kwargs)
def to_storable(self):
"""
Serialize to a Mongo-storable format.
"""
return {
'fields': self.fields,
'block_type': self.block_type,
'definition': self.definition,
'defaults': self.defaults,
'edit_info': self.edit_info.to_storable()
}
def from_storable(self, block_data):
"""
De-serialize from Mongo-storable format to an object.
"""
# Contains the Scope.settings and 'children' field values.
# 'children' are stored as a list of (block_type, block_id) pairs.
self.fields = block_data.get('fields', {})
# XBlock type ID.
self.block_type = block_data.get('block_type', None)
# DB id of the record containing the content of this XBlock.
self.definition = block_data.get('definition', None)
# Scope.settings default values copied from a template block (used e.g. when
# blocks are copied from a library to a course)
self.defaults = block_data.get('defaults', {})
# EditInfo object containing all versioning/editing data.
self.edit_info = EditInfo(**block_data.get('edit_info', {}))
def __str__(self):
return ("BlockData(fields={0.fields}, "
"block_type={0.block_type}, "
"definition={0.definition}, "
"definition_loaded={0.definition_loaded}, "
"defaults={0.defaults}, "
"edit_info={0.edit_info})").format(self)
new_contract('BlockData', BlockData)
class IncorrectlySortedList(Exception):
"""
Thrown when calling find() on a SortedAssetList not sorted by filename.
"""
pass
class SortedAssetList(SortedListWithKey):
"""
List of assets that is sorted based on an asset attribute.
"""
def __init__(self, **kwargs):
self.filename_sort = False
key_func = kwargs.get('key', None)
if key_func is None:
kwargs['key'] = itemgetter('filename')
self.filename_sort = True
super(SortedAssetList, self).__init__(**kwargs)
@contract(asset_id=AssetKey)
def find(self, asset_id):
"""
Find the index of a particular asset in the list. This method is only functional for lists
sorted by filename. If the list is sorted on any other key, find() raises a
Returns: Index of asset, if found. None if not found.
"""
# Don't attempt to find an asset by filename in a list that's not sorted by filename.
if not self.filename_sort:
raise IncorrectlySortedList()
# See if this asset already exists by checking the external_filename.
# Studio doesn't currently support using multiple course assets with the same filename.
# So use the filename as the unique identifier.
idx = None
idx_left = self.bisect_left({'filename': asset_id.path})
idx_right = self.bisect_right({'filename': asset_id.path})
if idx_left != idx_right:
# Asset was found in the list.
idx = idx_left
return idx
@contract(asset_md=AssetMetadata)
def insert_or_update(self, asset_md):
"""
Insert asset metadata if asset is not present. Update asset metadata if asset is already present.
"""
metadata_to_insert = asset_md.to_storable()
asset_idx = self.find(asset_md.asset_id)
if asset_idx is None:
# Add new metadata sorted into the list.
self.add(metadata_to_insert)
else:
# Replace existing metadata.
self[asset_idx] = metadata_to_insert
class ModuleStoreAssetBase(object):
"""
The methods for accessing assets and their metadata
"""
def _find_course_asset(self, asset_key):
"""
Returns same as _find_course_assets plus the index to the given asset or None. Does not convert
to AssetMetadata; thus, is internal.
Arguments:
asset_key (AssetKey): what to look for
Returns:
Tuple of:
- AssetMetadata[] for all assets of the given asset_key's type
- the index of asset in list (None if asset does not exist)
"""
course_assets = self._find_course_assets(asset_key.course_key)
all_assets = SortedAssetList(iterable=[])
# Assets should be pre-sorted, so add them efficiently without sorting.
# extend() will raise a ValueError if the passed-in list is not sorted.
all_assets.extend(course_assets.setdefault(asset_key.block_type, []))
idx = all_assets.find(asset_key)
return course_assets, idx
@contract(asset_key='AssetKey')
def find_asset_metadata(self, asset_key, **kwargs):
"""
Find the metadata for a particular course asset.
Arguments:
asset_key (AssetKey): key containing original asset filename
Returns:
asset metadata (AssetMetadata) -or- None if not found
"""
course_assets, asset_idx = self._find_course_asset(asset_key)
if asset_idx is None:
return None
mdata = AssetMetadata(asset_key, asset_key.path, **kwargs)
all_assets = course_assets[asset_key.asset_type]
mdata.from_storable(all_assets[asset_idx])
return mdata
@contract(
course_key='CourseKey', asset_type='None | basestring',
start='int | None', maxresults='int | None', sort='tuple(str,(int,>=1,<=2))|None'
)
def get_all_asset_metadata(self, course_key, asset_type, start=0, maxresults=-1, sort=None, **kwargs):
"""
Returns a list of asset metadata for all assets of the given asset_type in the course.
Args:
course_key (CourseKey): course identifier
asset_type (str): the block_type of the assets to return. If None, return assets of all types.
start (int): optional - start at this asset number. Zero-based!
maxresults (int): optional - return at most this many, -1 means no limit
sort (array): optional - None means no sort
(sort_by (str), sort_order (str))
sort_by - one of 'uploadDate' or 'displayname'
sort_order - one of SortOrder.ascending or SortOrder.descending
Returns:
List of AssetMetadata objects.
"""
course_assets = self._find_course_assets(course_key)
# Determine the proper sort - with defaults of ('displayname', SortOrder.ascending).
key_func = None
sort_order = ModuleStoreEnum.SortOrder.ascending
if sort:
| if sort[0] == 'uploadDate': | 1,853 | lcc_e | python | null | f3072fdefd4e8da2e7e9faf48cffb40392217553f43722a7 |
|
# unionrepo.py - repository class for viewing union of repository changesets
#
# Derived from bundlerepo.py
# Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
# Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Repository class for "in-memory pull" of one local repository to another,
allowing operations like diff and log with revsets.
"""
from node import nullid
from i18n import _
import os
import util, mdiff, cmdutil, scmutil
import localrepo, changelog, manifest, filelog, revlog
class unionrevlog(revlog.revlog):
def __init__(self, opener, indexfile, revlog2, linkmapper):
# How it works:
# To retrieve a revision, we just need to know the node id so we can
# look it up in revlog2.
#
# To differentiate a rev in the second revlog from a rev in the revlog,
# we check revision against repotiprev.
opener = scmutil.readonlyvfs(opener)
revlog.revlog.__init__(self, opener, indexfile)
self.revlog2 = revlog2
n = len(self)
self.repotiprev = n - 1
self.bundlerevs = set() # used by 'bundle()' revset expression
for rev2 in self.revlog2:
rev = self.revlog2.index[rev2]
# rev numbers - in revlog2, very different from self.rev
_start, _csize, _rsize, _base, linkrev, p1rev, p2rev, node = rev
if linkmapper is None: # link is to same revlog
assert linkrev == rev2 # we never link back
link = n
else: # rev must be mapped from repo2 cl to unified cl by linkmapper
link = linkmapper(linkrev)
if node in self.nodemap:
# this happens for the common revlog revisions
self.bundlerevs.add(self.nodemap[node])
continue
p1node = self.revlog2.node(p1rev)
p2node = self.revlog2.node(p2rev)
e = (None, None, None, None,
link, self.rev(p1node), self.rev(p2node), node)
self.index.insert(-1, e)
self.nodemap[node] = n
self.bundlerevs.add(n)
n += 1
def _chunk(self, rev):
if rev <= self.repotiprev:
return revlog.revlog._chunk(self, rev)
return self.revlog2._chunk(self.node(rev))
def revdiff(self, rev1, rev2):
"""return or calculate a delta between two revisions"""
if rev1 > self.repotiprev and rev2 > self.repotiprev:
return self.revlog2.revdiff(
self.revlog2.rev(self.node(rev1)),
self.revlog2.rev(self.node(rev2)))
elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
return self.baserevdiff(rev1, rev2)
return mdiff.textdiff(self.revision(self.node(rev1)),
self.revision(self.node(rev2)))
def revision(self, nodeorrev):
"""return an uncompressed revision of a given node or revision
number.
"""
if isinstance(nodeorrev, int):
rev = nodeorrev
node = self.node(rev)
else:
node = nodeorrev
rev = self.rev(node)
if node == nullid:
return ""
if rev > self.repotiprev:
text = self.revlog2.revision(node)
self._cache = (node, rev, text)
else:
text = self.baserevision(rev)
# already cached
return text
def baserevision(self, nodeorrev):
# Revlog subclasses may override 'revision' method to modify format of
# content retrieved from revlog. To use unionrevlog with such class one
# needs to override 'baserevision' and make more specific call here.
return revlog.revlog.revision(self, nodeorrev)
def baserevdiff(self, rev1, rev2):
# Exists for the same purpose as baserevision.
return revlog.revlog.revdiff(self, rev1, rev2)
def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
raise NotImplementedError
def addgroup(self, revs, linkmapper, transaction):
raise NotImplementedError
def strip(self, rev, minlink):
raise NotImplementedError
def checksize(self):
raise NotImplementedError
class unionchangelog(unionrevlog, changelog.changelog):
def __init__(self, opener, opener2):
changelog.changelog.__init__(self, opener)
linkmapper = None
changelog2 = changelog.changelog(opener2)
unionrevlog.__init__(self, opener, self.indexfile, changelog2,
linkmapper)
def baserevision(self, nodeorrev):
# Although changelog doesn't override 'revision' method, some extensions
# may replace this class with another that does. Same story with
# manifest and filelog classes.
return changelog.changelog.revision(self, nodeorrev)
def baserevdiff(self, rev1, rev2):
return changelog.changelog.revdiff(self, rev1, rev2)
class unionmanifest(unionrevlog, manifest.manifest):
def __init__(self, opener, opener2, linkmapper):
manifest.manifest.__init__(self, opener)
manifest2 = manifest.manifest(opener2)
unionrevlog.__init__(self, opener, self.indexfile, manifest2,
linkmapper)
def baserevision(self, nodeorrev):
return manifest.manifest.revision(self, nodeorrev)
def baserevdiff(self, rev1, rev2):
return manifest.manifest.revdiff(self, rev1, rev2)
class unionfilelog(unionrevlog, filelog.filelog):
def __init__(self, opener, path, opener2, linkmapper, repo):
filelog.filelog.__init__(self, opener, path)
filelog2 = filelog.filelog(opener2, path)
unionrevlog.__init__(self, opener, self.indexfile, filelog2,
linkmapper)
self._repo = repo
def baserevision(self, nodeorrev):
return filelog.filelog.revision(self, nodeorrev)
def baserevdiff(self, rev1, rev2):
return filelog.filelog.revdiff(self, rev1, rev2)
def _file(self, f):
self._repo.file(f)
class unionpeer(localrepo.localpeer):
def canpush(self):
return False
class unionrepository(localrepo.localrepository):
def __init__(self, ui, path, path2):
localrepo.localrepository.__init__(self, ui, path)
self.ui.setconfig('phases', 'publish', False, 'unionrepo')
self._url = 'union:%s+%s' % (util.expandpath(path),
util.expandpath(path2))
self.repo2 = localrepo.localrepository(ui, path2)
@localrepo.unfilteredpropertycache
def changelog(self):
return unionchangelog(self.sopener, self.repo2.sopener)
def _clrev(self, rev2):
"""map from repo2 changelog rev to temporary rev in self.changelog"""
node = self.repo2.changelog.node(rev2)
return self.changelog.rev(node)
@localrepo.unfilteredpropertycache
def manifest(self):
return unionmanifest(self.sopener, self.repo2.sopener,
self._clrev)
def url(self):
return self._url
def file(self, f):
return unionfilelog(self.sopener, f, self.repo2.sopener,
self._clrev, self)
def close(self):
self.repo2.close()
def cancopy(self):
return False
def peer(self):
return unionpeer(self)
def getcwd(self):
return os.getcwd() # always outside the repo
def instance(ui, path, create):
if create:
raise util.Abort(_('cannot create new union repository'))
parentpath = ui.config("bundle", "mainreporoot", "")
if not parentpath:
# try to find the correct path to the working directory repo
parentpath = cmdutil.findrepo(os.getcwd())
if parentpath is None:
parentpath = ''
if parentpath:
# Try to make the full path relative so we get a nice, short URL.
# In particular, we don't want temp dir names in test outputs.
cwd = os.getcwd()
if parentpath == cwd:
parentpath = ''
else:
cwd = os.path.join(cwd,'')
if parentpath.startswith(cwd):
| parentpath = parentpath[len(cwd):] | 795 | lcc_e | python | null | ccc7e6259fdffd163ebd0d5779c4ced9624d7b3d215079cf |
|
#
# Copyright (c) 2015-2018 Canonical, Ltd.
#
# This file is part of Talisker
# (see http://github.com/canonical-ols/talisker).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # noqa
from collections import OrderedDict
from contextlib import contextmanager
import logging
import logging.handlers
import numbers
import sys
import time
from talisker.context import Context, ContextId
from talisker.util import (
get_errno_fields,
module_cache,
module_dict,
)
__all__ = [
'configure',
'configure_test_logging',
'logging_context',
]
logging_globals = module_dict()
def set_global_extra(extra):
if 'extra' not in logging_globals:
logging_globals['extra'] = OrderedDict()
logging_globals['extra'].update(extra)
def reset_logging():
"""Reset logging config"""
# avoid unclosed file resource warning
for handler in logging.getLogger().handlers:
if getattr(handler, '_debug_handler', False):
handler.stream.close()
logging.getLogger().handlers = []
NOISY_LOGS = {
'requests': logging.WARNING,
}
class LoggingContextProxy():
def __getattr__(self, attr):
return getattr(Context.logging, attr)
@contextmanager
def __call__(self, extra=None, **kwargs):
with Context.logging(extra, **kwargs):
yield
logging_context = LoggingContextProxy()
# backwards compat aliases
def set_logging_context(*args, **kwargs):
Context.logging.push(*args, **kwargs)
extra_logging = logging_context
def add_talisker_handler(level, handler, formatter=None):
if formatter is None:
formatter = StructuredFormatter()
handler.setFormatter(formatter)
handler.setLevel(level)
handler._talisker_handler = True
logging.getLogger().addHandler(handler)
def set_logger_class():
logging.setLoggerClass(StructuredLogger)
logging.getLogger().setLevel(logging.NOTSET)
@module_cache
def get_talisker_handler():
handler = logging.StreamHandler()
handler._root_talisker = True
return handler
def configure(config): # pragma: no cover
"""Configure default logging setup for our services.
This is basically:
- log to stderr
- output hybrid logfmt structured format
- maybe configure debug logging
"""
# avoid duplicate logging
if logging_globals.get('configured'):
return
set_logger_class()
formatter = StructuredFormatter()
if config.colour:
formatter = ColouredFormatter(style=config.colour)
# always INFO to stderr
add_talisker_handler(logging.INFO, get_talisker_handler(), formatter)
configure_warnings(config.devel)
supress_noisy_logs()
# defer this until logging has been set up
logger = logging.getLogger(__name__)
config_extra = {k: v.value for k, v in config.metadata().items() if v.raw}
if config_extra:
logger.info('talisker configured', extra=config_extra)
if config.ERRORS:
errors = {name: str(err) for name, err in config.ERRORS.items()}
logger.error('configuration errors', extra=errors)
if config.debuglog is not None:
if can_write_to_file(config.debuglog):
handler = logging.handlers.TimedRotatingFileHandler(
config.debuglog,
when='D',
interval=1,
backupCount=1,
delay=True,
utc=True,
)
handler._debug_handler = True
add_talisker_handler(logging.DEBUG, handler)
logger.info('enabling debug log', extra={'path': config.debuglog})
else:
logger.info('could not enable debug log, could not write to path',
extra={'path': config.debuglog})
# sentry integration
import talisker.sentry # defer to avoid logging setup
if talisker.sentry.enabled:
sentry_handler = talisker.sentry.get_log_handler()
add_talisker_handler(logging.ERROR, sentry_handler)
logging_globals['configured'] = True
def can_write_to_file(path):
try:
open(path, 'a').close()
except Exception:
return False
else:
return True
def supress_noisy_logs():
"""Set some custom log levels on some sub logs"""
for name, level in NOISY_LOGS.items():
logger = logging.getLogger(name)
logger.setLevel(level)
def configure_warnings(enable):
# never propogate warnings to root
warnings = logging.getLogger('py.warnings')
warnings.propagate = False
if enable:
warnings.addHandler(logging.StreamHandler())
def configure_test_logging(handler=None):
"""Add a handler (defaults to NullHandler) to root logger.
Prevents unconfigured logging from erroring, and swallows all logging,
which is usually what you want for unit tests. Unit test fixtures can
still add their own loggers to assert against log messages if needed.
"""
set_logger_class()
if handler is None:
handler = logging.NullHandler()
add_talisker_handler(logging.NOTSET, handler)
configure_warnings(True)
def enable_debug_log_stderr():
"""Enables debug logging on stderr
Checks for devel mode."""
logger = logging.getLogger(__name__)
logger.warning('setting stderr logging to DEBUG')
get_talisker_handler().setLevel(logging.DEBUG)
class StructuredLogger(logging.Logger):
"""A logger that handles passing 'extra' arguments to all logging calls.
Supports 3 sources of extra structured data:
1) global extra, designed to be set once at process start/
2) context extra, designed to be set per request or job, can cleaned up
afterwards.
3) per call extra, passed by the log call, as per normal logging
e.g. log.info('...', extra={...})
"""
# sadly, we must subclass and override, rather that use the new
# setLogRecordFactory() in 3.2+, as that does not pass the extra args
# through. Also, we need to support python 2.
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
# at this point we have 3 possible sources of extra kwargs
# - log call: extra
# - context : local_context.flat
# - global : logging_globals['extra']
#
# In case of collisions, we append _ to the end of the name, so no data
# is lost. The global ones are more important, so take priority - the
# user supplied keys are the ones renamed if needed
# Also, the ordering is specific - more specific tags first
trailer = None
structured = OrderedDict()
try:
if ContextId.get(None) is None:
context_extra = {}
request_id = None
else:
context_extra = logging_context.flat
request_id = Context.request_id
global_extra = logging_globals.get('extra', {})
if extra:
trailer = extra.pop('trailer', None)
for k, v in extra.items():
if k in context_extra or k in global_extra:
k = k + '_'
structured[k] = v
for k, v in context_extra.items():
if k in global_extra:
k = k + '_'
structured[k] = v
structured.update(global_extra)
if request_id:
structured['request_id'] = request_id
except Exception:
# ensure unexpected error doesn't break logging completely
structured = extra
kwargs = dict(func=func, extra=structured, sinfo=sinfo)
# python 2 doesn't support sinfo parameter
| if sys.version_info[0] == 2: | 897 | lcc_e | python | null | 17f8060bda3eea4e6b94e07ca7850afadc2a42895f40a08a |
|
"""
Step definitions for working with Django models.
"""
from datetime import datetime
import re
from django.core.management import call_command
from django.core.management.color import no_style
from django.db import connection
from django.db.models.loading import get_models
from django.utils.functional import curry
from functools import wraps
from lettuce import step
STEP_PREFIX = r'(?:Given|And|Then|When) '
def _models_generator():
"""
Build a hash of model verbose names to models
"""
for model in get_models():
yield (unicode(model._meta.verbose_name), model)
yield (unicode(model._meta.verbose_name_plural), model)
MODELS = dict(_models_generator())
_WRITE_MODEL = {}
def creates_models(model):
"""
Register a model-specific creation function. Wrapper around writes_models
that removes the field parameter (always a create operation).
"""
def decorated(func):
@wraps(func)
@writes_models(model)
def wrapped(data, field):
if field:
raise NotImplementedError(
"Must use the writes_models decorator to update models")
return func(data)
return decorated
def writes_models(model):
"""
Register a model-specific create and update function.
"""
def decorated(func):
"""
Decorator for the creation function.
"""
_WRITE_MODEL[model] = func
return func
return decorated
_MODEL_EXISTS = {}
def checks_existence(model):
"""
Register a model-specific existence check function.
"""
def decorated(func):
"""
Decorator for the existence function.
"""
_MODEL_EXISTS[model] = func
return func
return decorated
def hash_data(hash_):
"""
Convert strings from a Lettuce hash to appropriate types
"""
res = {}
for key, value in hash_.items():
if type(value) in (str, unicode):
if value == "true":
value = True
elif value == "false":
value = False
elif value == "null":
value = None
elif value.isdigit() and not re.match("^0[0-9]+", value):
value = int(value)
elif re.match(r'^\d{4}-\d{2}-\d{2}$', value):
value = datetime.strptime(value, "%Y-%m-%d")
res[key] = value
return res
def hashes_data(step):
"""
Convert strings from step hashes to appropriate types
"""
return [hash_data(hash_) for hash_ in step.hashes]
def get_model(model):
"""
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
"""
name = model.lower()
model = MODELS.get(model, None)
assert model, "Could not locate model by name '%s'" % name
return model
def reset_sequence(model):
"""
Reset the ID sequence for a model.
"""
sql = connection.ops.sequence_reset_sql(no_style(), [model])
for cmd in sql:
connection.cursor().execute(cmd)
def create_models(model, data):
"""
Create models for each data hash. Wrapper around write_models.
"""
return write_models(model, data, None)
def write_models(model, data, field=None):
"""
Create or update models for each data hash. If field is present, it is the
field that is used to get the existing models out of the database to update
them; otherwise, new models are created.
"""
if hasattr(data, 'hashes'):
data = hashes_data(data)
written = []
for hash_ in data:
if field:
if field not in hash_:
raise KeyError(("The \"%s\" field is required for all update "
"operations") % field)
model_kwargs = {field: hash_[field]}
model_obj = model.objects.get(**model_kwargs)
for to_set, val in hash_.items():
setattr(model_obj, to_set, val)
model_obj.save()
else:
model_obj = model.objects.create(**hash_)
written.append(model_obj)
reset_sequence(model)
return written
def _dump_model(model, attrs=None):
"""
Dump the model fields for debugging.
"""
for field in model._meta.fields:
print '%s=%s,' % (field.name, str(getattr(model, field.name))),
if attrs is not None:
for attr in attrs:
print '%s=%s,' % (attr, str(getattr(model, attr))),
for field in model._meta.many_to_many:
vals = getattr(model, field.name)
print '%s=%s (%i),' % (
field.name,
', '.join(map(str, vals.all())),
vals.count()),
print
def models_exist(model, data, queryset=None):
"""
Check whether the models defined by @data exist in the @queryset.
"""
if hasattr(data, 'hashes'):
data = hashes_data(data)
if not queryset:
queryset = model.objects
failed = 0
try:
for hash_ in data:
fields = {}
extra_attrs = {}
| for k, v in hash_.iteritems(): | 546 | lcc_e | python | null | 7ae84ee3dc227ab3ad301914db5aa12c8e0ecb2042da37aa |
|
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0103
"""Invenio BibEdit Administrator Interface."""
__revision__ = "$Id"
__lastupdated__ = """$Date: 2008/08/12 09:26:46 $"""
import cProfile
import cStringIO
import pstats
from invenio.jsonutils import json, json_unicode_to_utf8
from invenio.access_control_engine import acc_authorize_action
from invenio.bibedit_engine import (perform_request_ajax,
perform_request_init,
perform_request_newticket,
perform_request_compare,
perform_request_init_template_interface,
perform_request_ajax_template_interface)
from invenio.bibedit_utils import user_can_edit_record_collection
from invenio.config import CFG_SITE_LANG, CFG_SITE_SECURE_URL, CFG_SITE_RECORD
from invenio.messages import gettext_set_language
from invenio.urlutils import redirect_to_url
from invenio.webinterface_handler import WebInterfaceDirectory, wash_urlargd
from invenio.webpage import page
from invenio.webuser import collect_user_info, getUid, page_not_authorized
navtrail = (' <a class="navtrail" href=\"%s/help/admin\">Admin Area</a> '
) % CFG_SITE_SECURE_URL
navtrail_bibedit = (' <a class="navtrail" href=\"%s/help/admin\">Admin Area</a> ' + \
' > <a class="navtrail" href=\"%s/%s/edit\">Record Editor</a>'
) % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, CFG_SITE_RECORD)
def wrap_json_req_profiler(func):
def json_req_profiler(self, req, form):
if "ajaxProfile" in form:
profiler = cProfile.Profile()
return_val = profiler.runcall(func, self, req, form)
results = cStringIO.StringIO()
stats = pstats.Stats(profiler, stream=results)
stats.sort_stats('cumulative')
stats.print_stats(100)
json_in = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_in = json_unicode_to_utf8(json_in)
json_data = json.loads(return_val)
json_data.update({"profilerStats": "<pre style='overflow: scroll'>" + json_in['requestType'] + results.getvalue() + "</pre>"})
return json.dumps(json_data)
else:
return func(self, req, form)
return json_req_profiler
class WebInterfaceEditPages(WebInterfaceDirectory):
"""Defines the set of /edit pages."""
_exports = ['', 'new_ticket', 'compare_revisions', 'templates']
def __init__(self, recid=None):
"""Initialize."""
self.recid = recid
@wrap_json_req_profiler
def index(self, req, form):
"""Handle all BibEdit requests.
The responsibilities of this functions is:
* JSON decoding and encoding.
* Redirection, if necessary.
* Authorization.
* Calling the appropriate function from the engine.
"""
uid = getUid(req)
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
# If it is an Ajax request, extract any JSON data.
ajax_request, recid = False, None
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
if json_data.has_key('recID'):
recid = json_data['recID']
json_response = {'resultCode': 0, 'ID': json_data['ID']}
# Authorization.
user_info = collect_user_info(req)
if user_info['email'] == 'guest':
# User is not logged in.
if not ajax_request:
# Do not display the introductory recID selection box to guest
# users (as it used to be with v0.99.0):
dummy_auth_code, auth_message = acc_authorize_action(req,
'runbibedit')
referer = '/edit/'
if self.recid:
referer = '/%s/%s/edit/' % (CFG_SITE_RECORD, self.recid)
return page_not_authorized(req=req, referer=referer,
text=auth_message, navtrail=navtrail)
else:
# Session has most likely timed out.
json_response.update({'resultCode': 100})
return json.dumps(json_response)
elif self.recid:
# Handle redirects from /record/<record id>/edit
# generic URL.
redirect_to_url(req, '%s/%s/edit/#state=edit&recid=%s&recrev=%s' % (
CFG_SITE_SECURE_URL, CFG_SITE_RECORD, self.recid, ""))
elif recid is not None:
json_response.update({'recID': recid})
if json_data['requestType'] == "getRecord":
# Authorize access to record.
if not user_can_edit_record_collection(req, recid):
json_response.update({'resultCode': 101})
return json.dumps(json_response)
# Handle request.
if not ajax_request:
# Show BibEdit start page.
body, errors, warnings = perform_request_init(uid, argd['ln'], req, __lastupdated__)
title = 'Record Editor'
return page(title = title,
body = body,
errors = errors,
warnings = warnings,
uid = uid,
language = argd['ln'],
navtrail = navtrail,
lastupdated = __lastupdated__,
req = req,
body_css_classes = ['bibedit'])
else:
# Handle AJAX request.
json_response.update(perform_request_ajax(req, recid, uid,
json_data))
return json.dumps(json_response)
def compare_revisions(self, req, form):
"""Handle the compare revisions request"""
argd = wash_urlargd(form, { \
'ln': (str, CFG_SITE_LANG), \
'rev1' : (str, ''), \
'rev2' : (str, ''), \
'recid': (int, 0)})
ln = argd['ln']
uid = getUid(req)
_ = gettext_set_language(ln)
# Checking if currently logged user has permission to perform this request
auth_code, auth_message = acc_authorize_action(req, 'runbibedit')
if auth_code != 0:
return page_not_authorized(req=req, referer="/edit",
text=auth_message, navtrail=navtrail)
recid = argd['recid']
rev1 = argd['rev1']
rev2 = argd['rev2']
ln = argd['ln']
body, errors, warnings = perform_request_compare(ln, recid, rev1, rev2)
return page(title = _("Comparing two record revisions"),
body = body,
errors = errors,
warnings = warnings,
uid = uid,
language = ln,
navtrail = navtrail,
lastupdated = __lastupdated__,
req = req,
body_css_classes = ['bibedit'])
def new_ticket(self, req, form):
"""handle a edit/new_ticket request"""
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG), 'recid': (int, 0)})
ln = argd['ln']
_ = gettext_set_language(ln)
auth_code, auth_message = acc_authorize_action(req, 'runbibedit')
if auth_code != 0:
| return page_not_authorized(req=req, referer="/edit", | 756 | lcc_e | python | null | 04e295ceba60a64b82588ffaa2a96d5c35e2e469380e6944 |
|
# Copyright (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
# Copyright 2017 Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
connection: ssh
short_description: connect via ssh client binary
description:
- This connection plugin allows ansible to communicate to the target machines via normal ssh command line.
- Ansible does not expose a channel to allow communication between the user and the ssh process to accept
a password manually to decrypt an ssh key when using this connection plugin (which is the default). The
use of ``ssh-agent`` is highly recommended.
author: ansible (@core)
version_added: historical
options:
host:
description: Hostname/ip to connect to.
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_ssh_host
host_key_checking:
description: Determines if ssh should check host keys
type: boolean
ini:
- section: defaults
key: 'host_key_checking'
- section: ssh_connection
key: 'host_key_checking'
version_added: '2.5'
env:
- name: ANSIBLE_HOST_KEY_CHECKING
- name: ANSIBLE_SSH_HOST_KEY_CHECKING
version_added: '2.5'
vars:
- name: ansible_host_key_checking
version_added: '2.5'
- name: ansible_ssh_host_key_checking
version_added: '2.5'
password:
description: Authentication password for the C(remote_user). Can be supplied as CLI option.
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_ssh_password
ssh_args:
description: Arguments to pass to all ssh cli tools
default: '-C -o ControlMaster=auto -o ControlPersist=60s'
ini:
- section: 'ssh_connection'
key: 'ssh_args'
env:
- name: ANSIBLE_SSH_ARGS
vars:
- name: ansible_ssh_args
version_added: '2.7'
ssh_common_args:
description: Common extra args for all ssh CLI tools
ini:
- section: 'ssh_connection'
key: 'ssh_common_args'
version_added: '2.7'
env:
- name: ANSIBLE_SSH_COMMON_ARGS
version_added: '2.7'
vars:
- name: ansible_ssh_common_args
ssh_executable:
default: ssh
description:
- This defines the location of the ssh binary. It defaults to ``ssh`` which will use the first ssh binary available in $PATH.
- This option is usually not required, it might be useful when access to system ssh is restricted,
or when using ssh wrappers to connect to remote hosts.
env: [{name: ANSIBLE_SSH_EXECUTABLE}]
ini:
- {key: ssh_executable, section: ssh_connection}
#const: ANSIBLE_SSH_EXECUTABLE
version_added: "2.2"
vars:
- name: ansible_ssh_executable
version_added: '2.7'
sftp_executable:
default: sftp
description:
- This defines the location of the sftp binary. It defaults to ``sftp`` which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SFTP_EXECUTABLE}]
ini:
- {key: sftp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_sftp_executable
version_added: '2.7'
scp_executable:
default: scp
description:
- This defines the location of the scp binary. It defaults to `scp` which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SCP_EXECUTABLE}]
ini:
- {key: scp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_scp_executable
version_added: '2.7'
scp_extra_args:
description: Extra exclusive to the ``scp`` CLI
vars:
- name: ansible_scp_extra_args
env:
- name: ANSIBLE_SCP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: scp_extra_args
section: ssh_connection
version_added: '2.7'
sftp_extra_args:
description: Extra exclusive to the ``sftp`` CLI
vars:
- name: ansible_sftp_extra_args
env:
- name: ANSIBLE_SFTP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: sftp_extra_args
section: ssh_connection
version_added: '2.7'
ssh_extra_args:
description: Extra exclusive to the 'ssh' CLI
vars:
- name: ansible_ssh_extra_args
env:
- name: ANSIBLE_SSH_EXTRA_ARGS
version_added: '2.7'
ini:
- key: ssh_extra_args
section: ssh_connection
version_added: '2.7'
retries:
# constant: ANSIBLE_SSH_RETRIES
description: Number of attempts to connect.
default: 3
type: integer
env:
- name: ANSIBLE_SSH_RETRIES
ini:
- section: connection
key: retries
- section: ssh_connection
key: retries
vars:
- name: ansible_ssh_retries
version_added: '2.7'
port:
description: Remote port to connect to.
type: int
default: 22
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
- name: ansible_ssh_port
remote_user:
description:
- User name with which to login to the remote server, normally set by the remote_user keyword.
- If no user is supplied, Ansible will let the ssh client binary choose the user as it normally
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
- name: ansible_ssh_user
pipelining:
default: ANSIBLE_PIPELINING
description:
- Pipelining reduces the number of SSH operations required to execute a module on the remote server,
by executing many Ansible modules without actual file transfer.
- This can result in a very significant performance improvement when enabled.
- However this conflicts with privilege escalation (become).
For example, when using sudo operations you must first disable 'requiretty' in the sudoers file for the target hosts,
which is why this feature is disabled by default.
env:
- name: ANSIBLE_PIPELINING
#- name: ANSIBLE_SSH_PIPELINING
ini:
- section: defaults
key: pipelining
#- section: ssh_connection
# key: pipelining
type: boolean
vars:
- name: ansible_pipelining
- name: ansible_ssh_pipelining
private_key_file:
description:
- Path to private key file to use for authentication
ini:
- section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
- name: ansible_ssh_private_key_file
control_path:
description:
- This is the location to save ssh's ControlPath sockets, it uses ssh's variable substitution.
- Since 2.3, if null, ansible will generate a unique hash. Use `%(directory)s` to indicate where to use the control dir path setting.
env:
- name: ANSIBLE_SSH_CONTROL_PATH
ini:
- key: control_path
section: ssh_connection
vars:
- name: ansible_control_path
version_added: '2.7'
control_path_dir:
default: ~/.ansible/cp
description:
- This sets the directory to use for ssh control path if the control path setting is null.
- Also, provides the `%(directory)s` variable for the control path setting.
env:
- name: ANSIBLE_SSH_CONTROL_PATH_DIR
ini:
- section: ssh_connection
key: control_path_dir
vars:
- name: ansible_control_path_dir
version_added: '2.7'
sftp_batch_mode:
default: 'yes'
description: 'TODO: write it'
env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
ini:
- {key: sftp_batch_mode, section: ssh_connection}
type: bool
vars:
- name: ansible_sftp_batch_mode
version_added: '2.7'
scp_if_ssh:
default: smart
description:
- "Prefered method to use when transfering files over ssh"
- When set to smart, Ansible will try them until one succeeds or they all fail
- If set to True, it will force 'scp', if False it will use 'sftp'
env: [{name: ANSIBLE_SCP_IF_SSH}]
ini:
- {key: scp_if_ssh, section: ssh_connection}
vars:
- name: ansible_scp_if_ssh
version_added: '2.7'
use_tty:
version_added: '2.5'
default: 'yes'
description: add -tt to ssh commands to force tty allocation
env: [{name: ANSIBLE_SSH_USETTY}]
ini:
| - {key: usetty, section: ssh_connection} | 968 | lcc_e | python | null | 372cbea864b598e206205434a3c9016ac96913fb9e29fd56 |
|
# -*- coding: utf-8 -*-
##
## This file is part of Harvesting Kit.
## Copyright (C) 2013, 2014, 2015 CERN.
##
## Harvesting Kit is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Harvesting Kit is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Harvesting Kit; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import re
import sys
import time
from os import pardir
from os.path import (join,
dirname,
basename)
try:
from invenio.errorlib import register_exception
except ImportError:
register_exception = lambda a=1, b=2: True
from harvestingkit.minidom_utils import (get_value_in_tag,
xml_to_text)
from harvestingkit.utils import (format_arxiv_id,
add_nations_field)
from harvestingkit.bibrecord import (
record_add_field,
create_record,
record_xml_output,
)
from xml.dom.minidom import parse
class JATSParser(object):
def __init__(self, tag_to_remove=None, extract_nations=False):
self.references = None
self.tag_to_remove = tag_to_remove
self.extract_nations = extract_nations
def get_article(self, path):
return parse(open(path))
def get_title(self, xml):
try:
return get_value_in_tag(xml, "article-title", tag_to_remove=self.tag_to_remove)
except Exception:
print >> sys.stderr, "Can't find title"
def get_issn(self, xml):
issns = xml.getElementsByTagName('issn')
ret = None
for issn in issns:
if issn.getAttribute('date-type').encode('utf-8') == 'epub' or issn.getAttribute('pub-type').encode('utf-8') == 'epub':
if issn.getAttribute("pub-type").encode('utf-8'):
ret = issn.getAttribute("pub-type").encode('utf-8')
else:
ret = issn.getAttribute("date-type").encode('utf-8')
if not ret and issns:
ret = xml_to_text(issns[0])
return ret
def get_date(self, xml):
dates = xml.getElementsByTagName('pub-date')
ret = None
for date in dates:
if date.getAttribute('date-type').encode('utf-8') == 'epub' or date.getAttribute('pub-type').encode('utf-8') == 'epub':
ret = get_value_in_tag(date, 'year')
if not ret and dates:
return dates[0]
else:
return ret
def get_publication_information(self, xml):
jid = get_value_in_tag(xml, "journal-title")
journal = ""
if "European Physical Journal" in jid:
journal = "EPJC"
try:
art = xml.getElementsByTagName('article-meta')[0]
except IndexError as err:
register_exception()
print >> sys.stderr, "ERROR: XML corrupted: %s" % err
pass
except Exception as err:
register_exception()
print >> sys.stderr, "ERROR: Exception captured: %s" % err
pass
issn = self.get_issn(art)
volume = get_value_in_tag(art, "volume")
issue = get_value_in_tag(art, "issue")
year = self.get_date(art)
first_page = get_value_in_tag(art, "fpage")
last_page = get_value_in_tag(art, "lpage")
doi = self.get_doi(art)
return (journal, issn, volume, issue, first_page, last_page, year, doi)
def get_doi(self, xml):
ids = xml.getElementsByTagName('article-id')
ret = ""
for i in ids:
if i.getAttribute('pub-id-type').encode('utf-8') == 'doi':
ret = xml_to_text(i)
if not ret:
print >> sys.stdout, "Can't find DOI."
return ret
def _get_orcid(self, xml_author):
try:
contrib_id = xml_author.getElementsByTagName('contrib-id')[0]
if contrib_id.getAttribute('contrib-id-type') == 'orcid':
orcid_raw = xml_to_text(contrib_id)
pattern = '\d\d\d\d-\d\d\d\d-\d\d\d\d-\d\d\d[\d|X]'
return re.search(pattern, orcid_raw).group()
except (IndexError, AttributeError):
return None
def get_authors(self, xml):
authors = []
for author in xml.getElementsByTagName("contrib"):
# Springer puts colaborations in additional "contrib" tag so to
# avoid having fake author with all affiliations we skip "contrib"
# tag with "contrib" subtags.
if author.getElementsByTagName("contrib"):
continue
tmp = {}
surname = get_value_in_tag(author, "surname")
if surname:
tmp["surname"] = surname
given_name = get_value_in_tag(author, "given-names")
if given_name:
tmp["given_name"] = given_name.replace('\n', ' ')
if not surname and not given_name:
tmp["name"] = get_value_in_tag(author, "string-name")
# It's not there yet
orcid = self._get_orcid(author)
if orcid:
tmp["orcid"] = 'ORCID:{0}'.format(orcid)
# cross_refs = author.getElementsByTagName("ce:cross-ref")
# if cross_refs:
# tmp["cross_ref"] = []
# for cross_ref in cross_refs:
# tmp["cross_ref"].append(cross_ref.getAttribute("refid").encode('utf-8'))
tmp["affiliations_ids"] = []
tmp["contact_ids"] = []
xrefs = author.getElementsByTagName("xref")
for x in xrefs:
if x.getAttribute('ref-type').encode('utf-8') == 'aff':
tmp["affiliations_ids"].extend([a.encode('utf-8') for a in x.getAttribute('rid').split()])
if x.getAttribute('ref-type').encode('utf-8') == 'corresp':
tmp["contact_ids"].extend([a.encode('utf-8') for a in x.getAttribute('rid').split()])
authors.append(tmp)
affiliations = {}
for affiliation in xml.getElementsByTagName("aff"):
aff_id = affiliation.getAttribute("id").encode('utf-8')
# removes numbering in from affiliations
text = re.sub(r'^(\d+,\ ?)', "", xml_to_text(affiliation, delimiter=", "))
affiliations[aff_id] = text
emails = {}
for contact in xml.getElementsByTagName("corresp"):
contact_id = contact.getAttribute("id").encode('utf-8')
if contact.getElementsByTagName('email'):
text = xml_to_text(contact.getElementsByTagName('email')[0])
emails[contact_id] = text
implicit_affilations = True
for author in authors:
matching_ref = [ref for ref in author.get("affiliations_ids") if ref in affiliations]
if matching_ref:
implicit_affilations = False
author["affiliation"] = []
for i in xrange(0, len(matching_ref)):
author["affiliation"].append(affiliations[matching_ref[i]])
matching_contact = [cont for cont in author.get('contact_ids') if cont in emails]
if matching_contact:
author["email"] = emails[matching_contact[0]]
if implicit_affilations and len(affiliations) > 1:
print >> sys.stderr, "Implicit affiliations are used, but there are more than one affiliation: %s" % affiliations
if implicit_affilations and len(affiliations) >= 1:
for author in authors:
author["affiliation"] = []
for aff in affiliations.values():
author["affiliation"].append(aff)
return authors
def get_abstract(self, xml):
try:
return get_value_in_tag(xml, "abstract", tag_to_remove=self.tag_to_remove).replace("Abstract", "", 1)
except Exception:
print >> sys.stderr, "Can't find abstract"
def get_copyright(self, xml, logger=None):
tags = ["copyright-holder", "copyright-statement"]
for tag in tags:
if tag is "copyright-holder":
ret = get_value_in_tag(xml, tag)
if not ret:
if logger:
logger.info("Can't find copyright, trying different tag.")
print >> sys.stderr, "Can't find copyright, trying different tag."
else:
return ret
else:
ret = get_value_in_tag(xml, tag)
if not ret:
if logger:
logger.info("Can't find copyright")
print >> sys.stderr, "Can't find copyright"
else:
ret = ret.split('.')
return ret[0]
def get_keywords(self, xml):
try:
kwd_groups = xml.getElementsByTagName('kwd-group')
pacs = []
other = []
for kwd_group in kwd_groups:
if kwd_group.getAttribute('kwd-group-type').encode('utf-8') == "pacs":
pacs = [xml_to_text(keyword, tag_to_remove=self.tag_to_remove) for keyword in kwd_group.getElementsByTagName("kwd")]
else:
other = [xml_to_text(keyword, tag_to_remove=self.tag_to_remove) for keyword in kwd_group.getElementsByTagName("kwd")]
return {"pacs": pacs, "other": other}
except Exception:
print >> sys.stderr, "Can't find keywords"
def get_ref_link(self, xml, name):
links = xml.getElementsByTagName('ext-link')
ret = None
for link in links:
if name in link.getAttribute("xlink:href").encode('utf-8'):
ret = xml_to_text(link).strip()
if not ret:
links = xml.getElementsByTagName('elocation-id')
for link in links:
if name in link.getAttribute("content-type").encode('utf-8'):
ret = xml_to_text(link).strip()
return ret
def get_page_count(self, xml):
counts = xml.getElementsByTagName("counts")
if counts:
page_count = counts[0].getElementsByTagName("page-count")
if page_count:
return page_count[0].getAttribute("count").encode('utf-8')
else:
return None
else:
return None
def get_publication_date(self, xml, logger=None):
date_xmls = xml.getElementsByTagName('pub-date')
day = None
month = None
year = None
if date_xmls:
for date_xml in date_xmls:
if date_xml.hasAttribute('pub-type'):
if date_xml.getAttribute('pub-type') == "epub":
day = get_value_in_tag(date_xml, "day")
month = get_value_in_tag(date_xml, "month")
year = get_value_in_tag(date_xml, "year")
if not year:
day = get_value_in_tag(date_xml, "day")
month = get_value_in_tag(date_xml, "month")
year = get_value_in_tag(date_xml, "year")
if logger:
logger.info('%s-%s-%s' % (year, month, day))
return '%s-%s-%s' % (year, month, day)
else:
print >> sys.stderr, "Can't find publication date. Using 'today'."
if logger:
logger.info("Can't find publication date. Using 'today'.")
return time.strftime('%Y-%m-%d')
def get_references(self, xml):
references = []
for reference in xml.getElementsByTagName("ref"):
plain_text = None
try:
ref_type = reference.getElementsByTagName('mixed-citation')[0]
ref_type = ref_type.getAttribute('publication-type').encode('utf-8')
except:
ref_type = reference.getElementsByTagName('citation')[0]
ref_type = ref_type.getAttribute('publication-type').encode('utf-8')
label = get_value_in_tag(reference, "label").strip('.')
authors = []
for author in reference.getElementsByTagName("name"):
given_name = get_value_in_tag(author, "given-names")
surname = get_value_in_tag(author, "surname")
if given_name:
name = "%s, %s" % (surname, given_name)
else:
name = surname
if name.strip().split() == []:
name = get_value_in_tag(author, "string-name")
authors.append(name)
doi_tag = reference.getElementsByTagName("pub-id")
doi = ""
for tag in doi_tag:
if tag.getAttribute("pub-id-type") == "doi":
doi = xml_to_text(tag)
issue = get_value_in_tag(reference, "issue")
page = get_value_in_tag(reference, "fpage")
page_last = get_value_in_tag(reference, "lpage")
title = get_value_in_tag(reference, "source")
volume = get_value_in_tag(reference, "volume")
year = get_value_in_tag(reference, "year")
ext_link = format_arxiv_id(self.get_ref_link(reference, "arxiv"))
if ref_type != 'journal':
try:
plain_text = get_value_in_tag(reference,
"mixed-citation",
tag_to_remove=self.tag_to_remove)
except:
plain_text = get_value_in_tag(reference,
"citation",
tag_to_remove=self.tag_to_remove)
references.append((label, authors, doi,
issue, page, page_last,
title, volume, year,
ext_link, plain_text))
self.references = references
def get_record(self, f_path, publisher=None, collection=None, logger=None):
xml = self.get_article(f_path)
rec = create_record()
title = self.get_title(xml)
if title:
record_add_field(rec, '245', subfields=[('a', title)])
record_add_field(rec, '260', subfields=[('c', self.get_publication_date(xml, logger))])
journal, issn, volume, issue, first_page, last_page, year, doi = self.get_publication_information(xml)
if logger:
logger.info("Creating record: %s %s" % (join(f_path, pardir), doi))
if doi:
record_add_field(rec, '024', ind1='7', subfields=[('a', doi), ('2', 'DOI')])
authors = self.get_authors(xml)
first_author = True
for author in authors:
if author.get('surname'):
subfields = [('a', '%s, %s' % (author.get('surname'), author.get('given_name') or author.get('initials', '')))]
else:
subfields = [('a', '%s' % (author.get('name', '')))]
if 'orcid' in author:
subfields.append(('j', author['orcid']))
if 'affiliation' in author:
for aff in author["affiliation"]:
subfields.append(('v', aff))
if self.extract_nations:
add_nations_field(subfields)
if author.get('email'):
subfields.append(('m', author['email']))
if first_author:
record_add_field(rec, '100', subfields=subfields)
first_author = False
else:
record_add_field(rec, '700', subfields=subfields)
page_count = self.get_page_count(xml)
if page_count:
| record_add_field(rec, '300', subfields=[('a', page_count)]) | 1,283 | lcc_e | python | null | 86663145e9262154f0a3a437a533c6207187300f593522f9 |
|
# coding: utf8
# This file is part of Scapy
# Scapy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# any later version.
#
# Scapy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scapy. If not, see <http://www.gnu.org/licenses/>.
# scapy.contrib.description = EtherNet/IP
# scapy.contrib.status = loads
# Copyright (C) 2019 Jose Diogo Monteiro <jdlopes@student.dei.uc.pt>
# Based on https://github.com/scy-phy/scapy-cip-enip
# Routines for EtherNet/IP (Industrial Protocol) dissection
# EtherNet/IP Home: www.odva.org
import struct
from scapy.packet import Packet, bind_layers
from scapy.layers.inet import TCP
from scapy.fields import LEShortField, LEShortEnumField, LEIntEnumField, \
LEIntField, LELongField, FieldLenField, PacketListField, ByteField, \
PacketField, MultipleTypeField, StrLenField, StrFixedLenField, \
XLEIntField, XLEStrLenField
_commandIdList = {
0x0004: "ListServices", # Request Struct Don't Have Command Spec Data
0x0063: "ListIdentity", # Request Struct Don't Have Command Spec Data
0x0064: "ListInterfaces", # Request Struct Don't Have Command Spec Data
0x0065: "RegisterSession", # Request Structure = Reply Structure
0x0066: "UnregisterSession", # Don't Have Command Specific Data
0x006f: "SendRRData", # Request Structure = Reply Structure
0x0070: "SendUnitData", # There is no reply
0x0072: "IndicateStatus",
0x0073: "Cancel"
}
_statusList = {
0: "success",
1: "invalid_cmd",
2: "no_resources",
3: "incorrect_data",
100: "invalid_session",
101: "invalid_length",
105: "unsupported_prot_rev"
}
_itemID = {
0x0000: "Null Address Item",
0x00a1: "Connection-based Address Item",
0x00b1: "Connected Transport packet Data Item",
0x00b2: "Unconnected message Data Item",
0x8000: "Sockaddr Info, originator-to-target Data Item",
0x8001: "Sockaddr Info, target-to-originator Data Item"
}
class ItemData(Packet):
"""Common Packet Format"""
name = "Item Data"
fields_desc = [
LEShortEnumField("typeId", 0, _itemID),
LEShortField("length", 0),
XLEStrLenField("data", "", length_from=lambda pkt: pkt.length),
]
def extract_padding(self, s):
return '', s
class EncapsulatedPacket(Packet):
"""Encapsulated Packet"""
name = "Encapsulated Packet"
fields_desc = [LEShortField("itemCount", 2), PacketListField(
"item", None, ItemData, count_from=lambda pkt: pkt.itemCount), ]
class BaseSendPacket(Packet):
""" Abstract Class"""
fields_desc = [
LEIntField("interfaceHandle", 0),
LEShortField("timeout", 0),
PacketField("encapsulatedPacket", None, EncapsulatedPacket),
]
class CommandSpecificData(Packet):
"""Command Specific Data Field Default"""
pass
class ENIPSendUnitData(BaseSendPacket):
"""Send Unit Data Command Field"""
name = "ENIPSendUnitData"
class ENIPSendRRData(BaseSendPacket):
"""Send RR Data Command Field"""
name = "ENIPSendRRData"
class ENIPListInterfacesReplyItems(Packet):
"""List Interfaces Items Field"""
name = "ENIPListInterfacesReplyItems"
fields_desc = [
LEIntField("itemTypeCode", 0),
FieldLenField("itemLength", 0, length_of="itemData"),
StrLenField("itemData", "", length_from=lambda pkt: pkt.itemLength),
]
class ENIPListInterfacesReply(Packet):
"""List Interfaces Command Field"""
name = "ENIPListInterfacesReply"
fields_desc = [
FieldLenField("itemCount", 0, count_of="identityItems"),
PacketField("identityItems", 0, ENIPListInterfacesReplyItems),
]
class ENIPListIdentityReplyItems(Packet):
"""List Identity Items Field"""
name = "ENIPListIdentityReplyItems"
fields_desc = [
LEIntField("itemTypeCode", 0),
FieldLenField("itemLength", 0, length_of="itemData"),
StrLenField("itemData", "", length_from=lambda pkt: pkt.item_length),
]
class ENIPListIdentityReply(Packet):
"""List Identity Command Field"""
name = "ENIPListIdentityReply"
fields_desc = [
FieldLenField("itemCount", 0, count_of="identityItems"),
PacketField("identityItems", None, ENIPListIdentityReplyItems),
]
class ENIPListServicesReplyItems(Packet):
"""List Services Items Field"""
name = "ENIPListServicesReplyItems"
fields_desc = [
LEIntField("itemTypeCode", 0),
LEIntField("itemLength", 0),
ByteField("version", 1),
ByteField("flag", 0),
StrFixedLenField("serviceName", None, 16 * 4),
]
class ENIPListServicesReply(Packet):
"""List Services Command Field"""
name = "ENIPListServicesReply"
fields_desc = [
FieldLenField("itemCount", 0, count_of="identityItems"),
PacketField("targetItems", None, ENIPListServicesReplyItems),
]
class ENIPRegisterSession(CommandSpecificData):
"""Register Session Command Field"""
name = "ENIPRegisterSession"
fields_desc = [
LEShortField("protocolVersion", 1),
LEShortField("options", 0)
]
class ENIPTCP(Packet):
"""Ethernet/IP packet over TCP"""
name = "ENIPTCP"
fields_desc = [
LEShortEnumField("commandId", None, _commandIdList),
LEShortField("length", 0),
XLEIntField("session", 0),
LEIntEnumField("status", None, _statusList),
LELongField("senderContext", 0),
LEIntField("options", 0),
MultipleTypeField(
[
# List Services Reply
(PacketField("commandSpecificData", ENIPListServicesReply,
ENIPListServicesReply),
lambda pkt: pkt.commandId == 0x4),
# List Identity Reply
(PacketField("commandSpecificData", ENIPListIdentityReply,
ENIPListIdentityReply),
lambda pkt: pkt.commandId == 0x63),
# List Interfaces Reply
(PacketField("commandSpecificData", ENIPListInterfacesReply,
ENIPListInterfacesReply),
lambda pkt: pkt.commandId == 0x64),
# Register Session
(PacketField("commandSpecificData", ENIPRegisterSession,
ENIPRegisterSession),
lambda pkt: pkt.commandId == 0x65),
# Send RR Data
(PacketField("commandSpecificData", ENIPSendRRData,
ENIPSendRRData),
| lambda pkt: pkt.commandId == 0x6f), | 625 | lcc_e | python | null | 7be7d972146bae6e379a2e75d85d7a8103d8d6cef2e618fd |
|
import os
import warnings
import numpy as np
from photon_tools.io import timetag_parse, pt2_parse, metadata
time_ch_dtype = np.dtype([('time', 'u8'), ('chan', 'u1')])
def verify_monotonic(times, filename):
""" Verify that timestamps are monotonically increasing """
if len(times) == 0: return
negatives = times[1:] <= times[:-1]
if np.count_nonzero(negatives) > 0:
indices = np.nonzero(negatives)
warnings.warn('%s: Found %d non-monotonic timestamps: photon indices %s' %
(filename, np.count_nonzero(negatives), indices))
def verify_continuity(times, filename, gap_factor=1000):
""" Search for improbably long gaps in the photon stream """
if len(times) == 0: return
tau = (times[-1] - times[0]) / len(times)
gaps = (times[1:] - times[:-1]) > gap_factor*tau
if np.count_nonzero(gaps) > 0:
msg = '%s: Found %d large gaps:\n' % (filename, np.count_nonzero(gaps))
gap_starts, = np.nonzero(gaps)
for s in gap_starts:
msg += ' starting at %10d, ending at %10d, lasting %10d' % \
(times[s], times[s+1], times[s+1] - times[s])
warnings.warn(msg)
class InvalidChannel(ValueError):
def __init__(self, requested_channel, valid_channels=[]):
self.requested_channel = requested_channel
self.valid_channels = valid_channels
def __str__(self):
return "Channel %s was requested but this file type only supports channels %s." \
% (self.requested_channel, self.valid_channels)
class TimestampFile(object):
"""
Represents a timestamp file.
A timestamp file is a file containing a sequential set of integer
photon arrival timestamps taken in one or more channels.
"""
def __init__(self, filename, jiffy, valid_channels=None):
if valid_channels is None:
valid_channels = self.__class__.valid_channels
self._valid_channels = valid_channels
self._fname = filename
self._jiffy = jiffy
@classmethod
def extensions(self):
"""
A list of supported file extensions
"""
return []
@property
def jiffy(self):
"""
The timestamp resolution in seconds or ``None`` is unknown.
:returns: float
"""
return self._jiffy
@property
def valid_channels(self):
"""
The names of the channels of the file.
Note that not all of these will have timestamps.
:returns: list
"""
return self._valid_channels
@property
def metadata(self):
"""
Metadata describing the data set.
:returns: dictionary mapping string metadata names to values
"""
return self._metadata
@property
def name(self):
""" File name of timestamp file """
return self._fname
def timestamps(self):
"""
Read the timestamp data for all channels of the file
:returns: An array of dtype :var:`time_ch_dtype` containing monotonically
increasing timestamps annotated with channel numbers.
"""
data = self._read_all()
verify_monotonic(data['time'], self._fname)
verify_continuity(data['time'], self._fname)
return data
def channel(self, channel):
"""
Read photon data for a channel of the file
:type channel: A valid channel name from :func:`valid_channels`.
:returns: An array of ``u8`` timestamps.
"""
self._validate_channel(channel)
data = self._read_channel(channel)
verify_monotonic(data, self._fname)
verify_continuity(data, self._fname)
return data
def _read_all(self):
""" Read the timestamps for all channels """
raise NotImplementedError()
def _read_channel(self, channel):
""" Actually read the data of a channel """
raise NotImplementedError()
def _validate_channel(self, channel):
""" A utility for implementations """
if channel not in self.valid_channels:
raise InvalidChannel(channel, self.valid_channels)
class PicoquantFile(TimestampFile):
""" A Picoquant PT2 and PT3 timestamp file """
valid_channels = [0,1,2,3]
extensions = ['pt2', 'pt3']
def __init__(self, fname):
# TODO: Read metadata
TimestampFile.__init__(self, fname, jiffy=4e-12)
def _read_all(self):
raise NotImplementedError()
def _read_channel(self, channel):
return pt2_parse.read_pt2(self._fname, channel)
class TimetagFile(TimestampFile):
""" A timestamp file from the Goldner lab FPGA timetagger """
extensions = ['timetag']
valid_channels = [0,1,2,3]
def __init__(self, fname):
TimestampFile.__init__(self, fname, jiffy = None)
self._metadata = metadata.get_metadata(fname)
if self.metadata is not None:
self._jiffy = 1. / self.metadata['clockrate']
if not os.path.isfile(fname):
raise IOError("File %s does not exist" % fname)
def _read_all(self):
res = timetag_parse.get_strobe_events(self._fname, 0xf)
res.dtype.names = time_ch_dtype.names
return res
def _read_channel(self, channel):
return timetag_parse.get_strobe_events(self._fname, 1<<channel)['t']
class RawFile(TimestampFile):
""" Raw unsigned 64-bit timestamps """
extensions = ['times']
valid_channels = [0]
def __init__(self, fname):
TimestampFile.__init__(self, fname, jiffy = None)
def _read_all(self):
timestamps = np.fromfile(self._fname, dtype='u8')
return np.from_records([timestamps, np.zeros_like(timestamps, dtype='u1')],
dtype=time_ch_dtype)
def _read_channel(self, channel):
return np.fromfile(self._fname, dtype='u8')
class RawChFile(TimestampFile):
""" Raw unsigned 64-bit timestamps, followed by 8-bit channel number """
extensions = ['timech']
valid_channels = range(256)
def __init__(self, fname):
TimestampFile.__init__(self, fname, jiffy = None)
def _read_all(self):
return np.fromfile(self._fname, dtype=time_ch_dtype)
def _read_channel(self, channel):
d = self._read_all()
return d[d['chan'] == channel]['time']
readers = [
PicoquantFile,
TimetagFile,
RawFile,
RawChFile,
]
def supported_extensions():
"""
A dictionary mapping supported file extensions to their associated
:class:`TimestampFile` class.
"""
extensions = {}
for reader in readers:
for ext in reader.extensions:
extensions[ext] = reader
return extensions
def find_reader(fname):
| exts = supported_extensions() | 653 | lcc_e | python | null | 61d1532f1b1f27850e6b4a51d10754f0cd32ef74d49068e3 |
|
#!/usr/bin/env python3
"""Compute the auto and cross-correlation of delta fields for a list of IGM
absorption.
This module follow the procedure described in sections 4.3 of du Mas des
Bourboux et al. 2020 (In prep) to compute the distortion matrix
"""
import time
import argparse
import multiprocessing
from multiprocessing import Pool, Lock, cpu_count, Value
from functools import partial
import numpy as np
import fitsio
from picca import constants, cf, utils, io
from picca.utils import userprint
def calc_metal_dmat(abs_igm1, abs_igm2, healpixs):
"""Computes the metal distortion matrix.
To optimize the computation, first compute a list of neighbours for each of
the healpix. This is an auxiliar function to split the computational load
using several CPUs.
Args:
abs_igm1: str
Name of the absorption in picca.constants defining the
redshift of the forest pixels
abs_igm2: str
Name of the second absorption in picca.constants defining the
redshift of the forest pixels
healpixs: array of ints
List of healpix numbers
Returns:
The distortion matrix data
"""
cf.fill_neighs(healpixs)
np.random.seed(healpixs[0])
dmat_data = cf.compute_metal_dmat(healpixs,
abs_igm1=abs_igm1,
abs_igm2=abs_igm2)
return dmat_data
def main():
# pylint: disable-msg=too-many-locals,too-many-branches,too-many-statements
"""Compute the auto and cross-correlation of delta fields for a list of IGM
absorption."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=('Compute the auto and cross-correlation of delta fields '
'for a list of IGM absorption.'))
parser.add_argument('--out',
type=str,
default=None,
required=True,
help='Output file name')
parser.add_argument('--in-dir',
type=str,
default=None,
required=True,
help='Directory to delta files')
parser.add_argument('--in-dir2',
type=str,
default=None,
required=False,
help='Directory to 2nd delta files')
parser.add_argument('--rp-min',
type=float,
default=0.,
required=False,
help='Min r-parallel [h^-1 Mpc]')
parser.add_argument('--rp-max',
type=float,
default=200.,
required=False,
help='Max r-parallel [h^-1 Mpc]')
parser.add_argument('--rt-max',
type=float,
default=200.,
required=False,
help='Max r-transverse [h^-1 Mpc]')
parser.add_argument('--np',
type=int,
default=50,
required=False,
help='Number of r-parallel bins')
parser.add_argument('--nt',
type=int,
default=50,
required=False,
help='Number of r-transverse bins')
parser.add_argument(
'--coef-binning-model',
type=int,
default=1,
required=False,
help=('Coefficient multiplying np and nt to get finner binning for the '
'model'))
parser.add_argument(
'--z-cut-min',
type=float,
default=0.,
required=False,
help=('Use only pairs of forest x object with the mean of the last '
'absorber redshift and the object redshift larger than '
'z-cut-min'))
parser.add_argument(
'--z-cut-max',
type=float,
default=10.,
required=False,
help=('Use only pairs of forest x object with the mean of the last '
'absorber redshift and the object redshift smaller than '
'z-cut-max'))
parser.add_argument(
'--lambda-abs',
type=str,
default='LYA',
required=False,
help=('Name of the absorption in picca.constants defining the redshift '
'of the delta'))
parser.add_argument(
'--lambda-abs2',
type=str,
default=None,
required=False,
help=('Name of the absorption in picca.constants defining the redshift '
'of the 2nd delta'))
parser.add_argument(
'--abs-igm',
type=str,
default=[],
required=False,
nargs='*',
help=('List of names of metal absorption in picca.constants present in '
'forest'))
parser.add_argument(
'--abs-igm2',
type=str,
default=[],
required=False,
nargs='*',
help=('List of names of metal absorption in picca.constants present in '
'2nd forest'))
parser.add_argument('--z-ref',
type=float,
default=2.25,
required=False,
help='Reference redshift')
parser.add_argument(
'--z-evol',
type=float,
default=2.9,
required=False,
help='Exponent of the redshift evolution of the delta field')
parser.add_argument(
'--z-evol2',
type=float,
default=2.9,
required=False,
help='Exponent of the redshift evolution of the 2nd delta field')
parser.add_argument(
'--metal-alpha',
type=float,
default=1.,
required=False,
help='Exponent of the redshift evolution of the metal delta field')
parser.add_argument(
'--fid-Om',
type=float,
default=0.315,
required=False,
help='Omega_matter(z=0) of fiducial LambdaCDM cosmology')
parser.add_argument(
'--fid-Or',
type=float,
default=0.,
required=False,
help='Omega_radiation(z=0) of fiducial LambdaCDM cosmology')
parser.add_argument('--fid-Ok',
type=float,
default=0.,
required=False,
help='Omega_k(z=0) of fiducial LambdaCDM cosmology')
parser.add_argument(
'--fid-wl',
type=float,
default=-1.,
required=False,
help='Equation of state of dark energy of fiducial LambdaCDM cosmology')
parser.add_argument(
'--remove-same-half-plate-close-pairs',
action='store_true',
required=False,
help='Reject pairs in the first bin in r-parallel from same half plate')
parser.add_argument(
'--rej',
type=float,
default=1.,
required=False,
help=('Fraction of rejected forest-forest pairs: -1=no rejection, '
'1=all rejection'))
parser.add_argument('--nside',
type=int,
default=16,
required=False,
help='Healpix nside')
parser.add_argument('--nproc',
type=int,
default=None,
required=False,
help='Number of processors')
parser.add_argument('--nspec',
type=int,
default=None,
required=False,
help='Maximum number of spectra to read')
parser.add_argument(
'--unfold-cf',
action='store_true',
required=False,
help=('rp can be positive or negative depending on the relative '
'position between absorber1 and absorber2'))
args = parser.parse_args()
if args.nproc is None:
args.nproc = cpu_count() // 2
userprint("nproc", args.nproc)
# setup variables in module cf
cf.r_par_max = args.rp_max
cf.r_trans_max = args.rt_max
cf.r_par_min = args.rp_min
cf.z_cut_max = args.z_cut_max
cf.z_cut_min = args.z_cut_min
cf.num_bins_r_par = args.np * args.coef_binning_model
cf.num_bins_r_trans = args.nt * args.coef_binning_model
cf.num_model_bins_r_par = args.np * args.coef_binning_model
cf.num_model_bins_r_trans = args.nt * args.coef_binning_model
cf.nside = args.nside
cf.z_ref = args.z_ref
cf.alpha = args.z_evol
cf.reject = args.rej
cf.lambda_abs = constants.ABSORBER_IGM[args.lambda_abs]
cf.remove_same_half_plate_close_pairs = args.remove_same_half_plate_close_pairs
cf.alpha_abs = {}
cf.alpha_abs[args.lambda_abs] = cf.alpha
for metal in args.abs_igm:
cf.alpha_abs[metal] = args.metal_alpha
# load fiducial cosmology
cf.cosmo = constants.Cosmo(Om=args.fid_Om,
Or=args.fid_Or,
Ok=args.fid_Ok,
wl=args.fid_wl)
t0 = time.time()
### Read data 1
data, num_data, z_min, z_max = io.read_deltas(args.in_dir,
cf.nside,
cf.lambda_abs,
cf.alpha,
cf.z_ref,
cf.cosmo,
max_num_spec=args.nspec)
del z_max
cf.data = data
cf.num_data = num_data
cf.ang_max = utils.compute_ang_max(cf.cosmo, cf.r_trans_max, z_min)
userprint("")
userprint("done, npix = {}".format(len(data)))
### Read data 2
if args.in_dir2 or args.lambda_abs2:
if args.lambda_abs2 or args.unfold_cf:
cf.x_correlation = True
cf.alpha2 = args.z_evol2
if args.in_dir2 is None:
args.in_dir2 = args.in_dir
if args.lambda_abs2:
cf.lambda_abs2 = constants.ABSORBER_IGM[args.lambda_abs2]
else:
cf.lambda_abs2 = cf.lambda_abs
cf.alpha_abs[args.lambda_abs2] = cf.alpha2
for m in args.abs_igm2:
cf.alpha_abs[m] = args.metal_alpha
data2, num_data2, z_min2, z_max2 = io.read_deltas(
args.in_dir2,
cf.nside,
cf.lambda_abs2,
cf.alpha2,
cf.z_ref,
cf.cosmo,
max_num_spec=args.nspec)
del z_max2
cf.data2 = data2
cf.num_data2 = num_data2
cf.ang_max = utils.compute_ang_max(cf.cosmo, cf.r_trans_max, z_min,
z_min2)
userprint("")
userprint("done, npix = {}".format(len(data2)))
t1 = time.time()
userprint(f'picca_metal_dmat.py - Time reading data: {(t1-t0)/60:.3f} minutes')
cf.counter = Value('i', 0)
cf.lock = Lock()
cpu_data = {}
for index, healpix in enumerate(sorted(list(data.keys()))):
num_processor = index % args.nproc
if not num_processor in cpu_data:
cpu_data[num_processor] = []
cpu_data[num_processor].append(healpix)
# intiialize arrays to store the results for the different metal absorption
dmat_all = []
weights_dmat_all = []
r_par_all = []
r_trans_all = []
z_all = []
names = []
num_pairs_all = []
num_pairs_used_all = []
abs_igm = [args.lambda_abs] + args.abs_igm
userprint("abs_igm = {}".format(abs_igm))
if args.lambda_abs2 is None:
args.lambda_abs2 = args.lambda_abs
args.abs_igm2 = args.abs_igm
abs_igm_2 = [args.lambda_abs2] + args.abs_igm2
if cf.x_correlation:
userprint("abs_igm2 = {}".format(abs_igm_2))
# loop over metals
for index1, abs_igm1 in enumerate(abs_igm):
index0 = index1
if args.lambda_abs != args.lambda_abs2:
index0 = 0
for index2, abs_igm2 in enumerate(abs_igm_2[index0:]):
if index1 == 0 and index2 == 0:
continue
cf.counter.value = 0
calc_metal_dmat_wrapper = partial(calc_metal_dmat, abs_igm1,
abs_igm2)
userprint("")
# compute the distortion matrix
if args.nproc > 1:
context = multiprocessing.get_context('fork')
pool = context.Pool(processes=args.nproc)
dmat_data = pool.map(calc_metal_dmat_wrapper,
sorted(cpu_data.values()))
pool.close()
elif args.nproc == 1:
dmat_data = map(calc_metal_dmat_wrapper,
sorted(cpu_data.values()))
dmat_data = list(dmat_data)
# merge the results from different CPUs
dmat_data = np.array(dmat_data)
weights_dmat = dmat_data[:, 0].sum(axis=0)
dmat = dmat_data[:, 1].sum(axis=0)
r_par = dmat_data[:, 2].sum(axis=0)
r_trans = dmat_data[:, 3].sum(axis=0)
z = dmat_data[:, 4].sum(axis=0)
weights = dmat_data[:, 5].sum(axis=0)
num_pairs = dmat_data[:, 6].sum(axis=0)
num_pairs_used = dmat_data[:, 7].sum(axis=0)
# normalize_values
w = weights > 0
r_par[w] /= weights[w]
r_trans[w] /= weights[w]
z[w] /= weights[w]
w = weights_dmat > 0
dmat[w, :] /= weights_dmat[w, None]
# add these results to the list ofor the different metal absorption
dmat_all.append(dmat)
weights_dmat_all.append(weights_dmat)
r_par_all.append(r_par)
r_trans_all.append(r_trans)
z_all.append(z)
names.append(abs_igm1 + "_" + abs_igm2)
num_pairs_all.append(num_pairs)
num_pairs_used_all.append(num_pairs_used)
t2 = time.time()
userprint(f'picca_metal_dmat.py - Time computing all metal matrices : {(t2-t1)/60:.3f} minutes')
# save the results
results = fitsio.FITS(args.out, 'rw', clobber=True)
header = [
{
'name': 'RPMIN',
'value': cf.r_par_min,
'comment': 'Minimum r-parallel [h^-1 Mpc]'
},
{
'name': 'RPMAX',
'value': cf.r_par_max,
'comment': 'Maximum r-parallel [h^-1 Mpc]'
},
{
'name': 'RTMAX',
'value': cf.r_trans_max,
'comment': 'Maximum r-transverse [h^-1 Mpc]'
},
{
'name': 'NP',
'value': cf.num_bins_r_par,
'comment': 'Number of bins in r-parallel'
},
{
'name': 'NT',
'value': cf.num_bins_r_trans,
'comment': ' Number of bins in r-transverse'
},
{
'name': 'COEFMOD',
'value': args.coef_binning_model,
'comment': 'Coefficient for model binning'
},
{
'name': 'ZCUTMIN',
'value': cf.z_cut_min,
'comment': 'Minimum redshift of pairs'
},
{
'name': 'ZCUTMAX',
'value': cf.z_cut_max,
'comment': 'Maximum redshift of pairs'
},
{
'name': 'REJ',
'value': cf.reject,
'comment': 'Rejection factor'
},
{
'name': 'ALPHAMET',
'value': args.metal_alpha,
'comment': 'Evolution of metal bias'
}, {
'name': 'OMEGAM',
'value': args.fid_Om,
'comment': 'Omega_matter(z=0) of fiducial LambdaCDM cosmology'
}, {
'name': 'OMEGAR',
'value': args.fid_Or,
'comment': 'Omega_radiation(z=0) of fiducial LambdaCDM cosmology'
}, {
'name': 'OMEGAK',
'value': args.fid_Ok,
'comment': 'Omega_k(z=0) of fiducial LambdaCDM cosmology'
}, {
'name': 'WL',
'value': args.fid_wl,
'comment': 'Equation of state of dark energy of fiducial LambdaCDM cosmology'
}
]
len_names = np.array([len(name) for name in names]).max()
names = np.array(names, dtype='S' + str(len_names))
results.write(
[
np.array(num_pairs_all),
np.array(num_pairs_used_all),
np.array(names)
],
names=['NPALL', 'NPUSED', 'ABS_IGM'],
header=header,
comment=['Number of pairs', 'Number of used pairs', 'Absorption name'],
extname='ATTRI')
names = names.astype(str)
out_list = []
out_names = []
out_comment = []
out_units = []
for index, name in enumerate(names):
out_names += ['RP_' + name]
out_list += [r_par_all[index]]
out_comment += ['R-parallel']
out_units += ['h^-1 Mpc']
out_names += ['RT_' + name]
out_list += [r_trans_all[index]]
out_comment += ['R-transverse']
out_units += ['h^-1 Mpc']
out_names += ['Z_' + name]
out_list += [z_all[index]]
out_comment += ['Redshift']
out_units += ['']
out_names += ['DM_' + name]
out_list += [dmat_all[index]]
out_comment += ['Distortion matrix']
out_units += ['']
out_names += ['WDM_' + name]
| out_list += [weights_dmat_all[index]] | 1,374 | lcc_e | python | null | 679f7e9fbff5d78f3c5e8e268644626790d4fa5fa3997cae |
|
from enum import Enum
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional, TextIO, Union
import re
from .config import LIST_TYPE_TAGS, TAG_KEY_MAPPING, WOK_TAG_KEY_MAPPING, WOK_LIST_TYPE_TAGS
__all__ = ["load", "loads"]
class RisImplementation(Enum):
BASE = "base"
WOK = "wok"
class NextLine(Exception):
pass
class Base:
START_TAG: str = None
END_TAG: str = "ER"
IGNORE: List[str] = []
PATTERN: str = None
def __init__(self, lines, mapping, strict=True):
self.lines = lines
self.pattern = re.compile(self.PATTERN)
self._mapping = mapping
self.strict = strict
@property
def mapping(self):
if self._mapping is not None:
return self._mapping
else:
return self.default_mapping
def parse(self):
self.in_ref = False
self.current = {}
self.last_tag = None
for line_number, line in enumerate(self.lines):
if not line.strip():
continue
if self.is_tag(line):
try:
yield self.parse_tag(line, line_number)
self.current = {}
self.in_ref = False
self.last_tag = None
except NextLine:
continue
else:
try:
yield self.parse_other(line, line_number)
except NextLine:
continue
def parse_tag(self, line, line_number):
tag = self.get_tag(line)
if tag in self.IGNORE:
raise NextLine
if tag == self.END_TAG:
return self.current
if tag == self.START_TAG:
# New entry
if self.in_ref:
raise IOError(f"Missing end of record tag in line {line_number}:\n {line}")
self.add_tag(tag, line)
self.in_ref = True
raise NextLine
if not self.in_ref:
raise IOError(f"Invalid start tag in line {line_number}:\n {line}")
if tag in self.mapping:
self.add_tag(tag, line)
raise NextLine
else:
self.add_unknown_tag(tag, line)
raise NextLine
raise NextLine
def parse_other(self, line, line_number):
if not self.strict:
raise NextLine
if self.in_ref:
# Active reference
if self.last_tag is None:
raise IOError(f"Expected tag in line {line_number}:\n {line}")
# Active tag
self.add_tag(self.last_tag, line, all_line=True)
raise NextLine
if self.is_counter(line):
raise NextLine
raise IOError(f"Expected start tag in line {line_number}:\n {line}")
def add_single_value(self, name, value, is_multi=False):
if not is_multi:
ignore_this_if_has_one = value
self.current.setdefault(name, ignore_this_if_has_one)
return
value_must_exist_or_is_bug = self.current[name]
self.current[name] = " ".join((value_must_exist_or_is_bug, value))
def add_list_value(self, name, value):
try:
self.current[name].append(value)
except KeyError:
self.current[name] = [value]
def add_tag(self, tag, line, all_line=False):
self.last_tag = tag
name = self.mapping[tag]
if all_line:
new_value = line.strip()
else:
new_value = self.get_content(line)
if tag not in LIST_TYPE_TAGS:
self.add_single_value(name, new_value, is_multi=all_line)
return
self.add_list_value(name, new_value)
def add_unknown_tag(self, tag, line):
name = self.mapping["UK"]
tag = self.get_tag(line)
value = self.get_content(line)
# check if unknown_tag dict exists
if name not in self.current:
self.current[name] = defaultdict(list)
self.current[name][tag].append(value)
def get_tag(self, line):
return line[0:2]
def is_tag(self, line):
return bool(self.pattern.match(line))
def get_content(self, line):
raise NotImplementedError
class Wok(Base):
START_TAG = "PT"
IGNORE = ["FN", "VR", "EF"]
PATTERN = r"^[A-Z][A-Z0-9] |^ER\s?|^EF\s?"
LIST_TYPE_TAGS = WOK_LIST_TYPE_TAGS
default_mapping = WOK_TAG_KEY_MAPPING
def get_content(self, line):
return line[2:].strip()
def is_counter(self, line):
return True
class Ris(Base):
START_TAG = "TY"
PATTERN = r"^[A-Z][A-Z0-9] - |^ER -\s*$"
default_mapping = TAG_KEY_MAPPING
counter_re = re.compile("^[0-9]+.")
def get_content(self, line):
return line[6:].strip()
def is_counter(self, line):
none_or_match = self.counter_re.match(line)
return bool(none_or_match)
def load(
file: Union[TextIO, Path],
mapping: Optional[Dict] = None,
implementation: RisImplementation = RisImplementation.BASE,
strict: bool = True,
) -> List[Dict]:
"""Load a RIS file and return a list of entries.
Entries are codified as dictionaries whose keys are the
different tags. For single line and singly occurring tags,
the content is codified as a string. In the case of multiline
or multiple key occurrences, the content is returned as a list
of strings.
Args:
file (Union[TextIO, Path]): File handle to read ris formatted data.
mapping (Dict, optional): a tag mapping dictionary.
implementation (RisImplementation): RIS implementation; base by default.
strict (bool): Boolean to allow non-tag data between records to be ignored.
Returns:
list: Returns list of RIS entries.
"""
text = file.read_text() if isinstance(file, Path) else file.read()
return list(loads(text, mapping, implementation, strict))
def loads(
obj: str,
mapping: Optional[Dict] = None,
implementation: RisImplementation = RisImplementation.BASE,
strict: bool = True,
) -> List[Dict]:
"""Load a RIS file and return a list of entries.
Entries are codified as dictionaries whose keys are the
different tags. For single line and singly occurring tags,
the content is codified as a string. In the case of multiline
or multiple key occurrences, the content is returned as a list
of strings.
Args:
obj (str): A string version of an RIS file.
mapping (Dict, optional): a tag mapping dictionary.
implementation (RisImplementation): RIS implementation; base by default.
strict (bool): Boolean to allow non-tag data between records to be ignored.
Returns:
list: Returns list of RIS entries.
"""
# remove BOM if present
| obj = obj.lstrip("\ufeff") | 676 | lcc_e | python | null | 558f671494850fdb1783371aa9016d6202151b1b538c13a5 |
|
import os
import random
import re
import socket
import subprocess
import time
import zipfile
import tempfile
import base64
import shutil
import sys
from io import BytesIO
import pytest
from contextlib import contextmanager
from multiprocessing import Process
from urllib.request import urlopen, Request
from werkzeug.datastructures import Headers
from werkzeug.exceptions import RequestedRangeNotSatisfiable
from onionshare_cli.common import Common
from onionshare_cli.web import Web
from onionshare_cli.web.share_mode import parse_range_header
from onionshare_cli.settings import Settings
from onionshare_cli.mode_settings import ModeSettings
import onionshare_cli.web.receive_mode
# Stub requests.post, for receive mode webhook tests
webhook_url = None
webhook_data = None
def requests_post_stub(url, data, timeout, proxies):
global webhook_url, webhook_data
webhook_url = url
webhook_data = data
onionshare_cli.web.receive_mode.requests.post = requests_post_stub
DEFAULT_ZW_FILENAME_REGEX = re.compile(r"^onionshare_[a-z2-7]{6}.zip$")
RANDOM_STR_REGEX = re.compile(r"^[a-z2-7]+$")
def web_obj(temp_dir, common_obj, mode, num_files=0):
"""Creates a Web object, in either share mode or receive mode, ready for testing"""
common_obj.settings = Settings(common_obj)
mode_settings = ModeSettings(common_obj)
web = Web(common_obj, False, mode_settings, mode)
web.generate_password()
web.running = True
web.cleanup_filenames == []
web.app.testing = True
# Share mode
if mode == "share":
# Add files
files = []
for _ in range(num_files):
with tempfile.NamedTemporaryFile(delete=False, dir=temp_dir) as tmp_file:
tmp_file.write(b"*" * 1024)
files.append(tmp_file.name)
web.share_mode.set_file_info(files)
# Receive mode
else:
pass
return web
class TestWeb:
def test_share_mode(self, temp_dir, common_obj):
web = web_obj(temp_dir, common_obj, "share", 3)
assert web.mode == "share"
with web.app.test_client() as c:
# Load / without auth
res = c.get("/")
res.get_data()
assert res.status_code == 401
# Load / with invalid auth
res = c.get("/", headers=self._make_auth_headers("invalid"))
res.get_data()
assert res.status_code == 401
# Load / with valid auth
res = c.get("/", headers=self._make_auth_headers(web.password))
res.get_data()
assert res.status_code == 200
# Download
res = c.get("/download", headers=self._make_auth_headers(web.password))
res.get_data()
assert res.status_code == 200
assert (
res.mimetype == "application/zip"
or res.mimetype == "application/x-zip-compressed"
)
def test_share_mode_autostop_sharing_on(self, temp_dir, common_obj, temp_file_1024):
web = web_obj(temp_dir, common_obj, "share", 3)
web.settings.set("share", "autostop_sharing", True)
assert web.running is True
with web.app.test_client() as c:
# Download the first time
res = c.get("/download", headers=self._make_auth_headers(web.password))
res.get_data()
assert res.status_code == 200
assert (
res.mimetype == "application/zip"
or res.mimetype == "application/x-zip-compressed"
)
assert web.running is False
def test_share_mode_autostop_sharing_off(
self, temp_dir, common_obj, temp_file_1024
):
web = web_obj(temp_dir, common_obj, "share", 3)
web.settings.set("share", "autostop_sharing", False)
assert web.running is True
with web.app.test_client() as c:
# Download the first time
res = c.get("/download", headers=self._make_auth_headers(web.password))
res.get_data()
assert res.status_code == 200
assert (
res.mimetype == "application/zip"
or res.mimetype == "application/x-zip-compressed"
)
assert web.running is True
def test_receive_mode(self, temp_dir, common_obj):
web = web_obj(temp_dir, common_obj, "receive")
assert web.mode == "receive"
with web.app.test_client() as c:
# Load / without auth
res = c.get("/")
res.get_data()
assert res.status_code == 401
# Load / with invalid auth
res = c.get("/", headers=self._make_auth_headers("invalid"))
res.get_data()
assert res.status_code == 401
# Load / with valid auth
res = c.get("/", headers=self._make_auth_headers(web.password))
res.get_data()
assert res.status_code == 200
def test_receive_mode_webhook(self, temp_dir, common_obj):
global webhook_url, webhook_data
webhook_url = None
webhook_data = None
web = web_obj(temp_dir, common_obj, "receive")
assert web.mode == "receive"
web.settings.set("receive", "webhook_url", "http://127.0.0.1:1337/example")
web.proxies = None
assert (
web.settings.get("receive", "webhook_url")
== "http://127.0.0.1:1337/example"
)
with web.app.test_client() as c:
res = c.get("/", headers=self._make_auth_headers(web.password))
res.get_data()
assert res.status_code == 200
res = c.post(
"/upload-ajax",
buffered=True,
content_type="multipart/form-data",
data={"file[]": (BytesIO(b"THIS IS A TEST FILE"), "new_york.jpg")},
headers=self._make_auth_headers(web.password),
)
res.get_data()
assert res.status_code == 200
assert webhook_url == "http://127.0.0.1:1337/example"
assert webhook_data == "1 file submitted to OnionShare"
def test_receive_mode_message_no_files(self, temp_dir, common_obj):
web = web_obj(temp_dir, common_obj, "receive")
data_dir = os.path.join(temp_dir, "OnionShare")
os.makedirs(data_dir, exist_ok=True)
web.settings.set("receive", "data_dir", data_dir)
with web.app.test_client() as c:
res = c.post(
"/upload-ajax",
buffered=True,
content_type="multipart/form-data",
data={"text": "you know just sending an anonymous message"},
headers=self._make_auth_headers(web.password),
)
content = res.get_data()
assert res.status_code == 200
assert b"Message submitted" in content
# ~/OnionShare should have a folder for the date
filenames = os.listdir(data_dir)
assert len(filenames) == 1
data_dir_date = os.path.join(data_dir, filenames[0])
# The date folder should have a single message txt file, no folders
filenames = os.listdir(data_dir_date)
assert len(filenames) == 1
assert filenames[0].endswith("-message.txt")
shutil.rmtree(data_dir)
def test_receive_mode_message_and_files(self, temp_dir, common_obj):
web = web_obj(temp_dir, common_obj, "receive")
data_dir = os.path.join(temp_dir, "OnionShare")
os.makedirs(data_dir, exist_ok=True)
web.settings.set("receive", "data_dir", data_dir)
with web.app.test_client() as c:
res = c.post(
"/upload-ajax",
buffered=True,
content_type="multipart/form-data",
data={
"file[]": (BytesIO(b"THIS IS A TEST FILE"), "new_york.jpg"),
"text": "you know just sending an anonymous message",
},
headers=self._make_auth_headers(web.password),
)
content = res.get_data()
assert res.status_code == 200
assert b"Message submitted, uploaded new_york.jpg" in content
# Date folder should have a time folder with new_york.jpg, and a text message file
data_dir_date = os.path.join(data_dir, os.listdir(data_dir)[0])
filenames = os.listdir(data_dir_date)
assert len(filenames) == 2
time_str = filenames[0][0:6]
assert time_str in filenames
assert f"{time_str}-message.txt" in filenames
data_dir_time = os.path.join(data_dir_date, time_str)
assert os.path.isdir(data_dir_time)
assert os.path.exists(os.path.join(data_dir_time, "new_york.jpg"))
shutil.rmtree(data_dir)
def test_receive_mode_files_no_message(self, temp_dir, common_obj):
web = web_obj(temp_dir, common_obj, "receive")
data_dir = os.path.join(temp_dir, "OnionShare")
os.makedirs(data_dir, exist_ok=True)
web.settings.set("receive", "data_dir", data_dir)
with web.app.test_client() as c:
res = c.post(
"/upload-ajax",
buffered=True,
content_type="multipart/form-data",
data={"file[]": (BytesIO(b"THIS IS A TEST FILE"), "new_york.jpg")},
headers=self._make_auth_headers(web.password),
)
content = res.get_data()
assert res.status_code == 200
assert b"Uploaded new_york.jpg" in content
# Date folder should have just a time folder with new_york.jpg
data_dir_date = os.path.join(data_dir, os.listdir(data_dir)[0])
filenames = os.listdir(data_dir_date)
assert len(filenames) == 1
time_str = filenames[0][0:6]
assert time_str in filenames
assert f"{time_str}-message.txt" not in filenames
data_dir_time = os.path.join(data_dir_date, time_str)
assert os.path.isdir(data_dir_time)
assert os.path.exists(os.path.join(data_dir_time, "new_york.jpg"))
shutil.rmtree(data_dir)
def test_receive_mode_no_message_no_files(self, temp_dir, common_obj):
web = web_obj(temp_dir, common_obj, "receive")
data_dir = os.path.join(temp_dir, "OnionShare")
os.makedirs(data_dir, exist_ok=True)
web.settings.set("receive", "data_dir", data_dir)
with web.app.test_client() as c:
res = c.post(
"/upload-ajax",
buffered=True,
content_type="multipart/form-data",
data={},
headers=self._make_auth_headers(web.password),
)
content = res.get_data()
assert res.status_code == 200
assert b"Nothing submitted" in content
# Date folder should be empty
data_dir_date = os.path.join(data_dir, os.listdir(data_dir)[0])
filenames = os.listdir(data_dir_date)
assert len(filenames) == 0
shutil.rmtree(data_dir)
def test_public_mode_on(self, temp_dir, common_obj):
web = web_obj(temp_dir, common_obj, "receive")
web.settings.set("general", "public", True)
with web.app.test_client() as c:
# Loading / should work without auth
res = c.get("/")
res.get_data()
assert res.status_code == 200
def test_public_mode_off(self, temp_dir, common_obj):
web = web_obj(temp_dir, common_obj, "receive")
web.settings.set("general", "public", False)
with web.app.test_client() as c:
# Load / without auth
| res = c.get("/") | 919 | lcc_e | python | null | 868c857f4c55cf95b3e5964786bc9bfb558a134958ed9614 |
|
from __future__ import with_statement, print_function
# Script for building the _ssl and _hashlib modules for Windows.
# Uses Perl to setup the OpenSSL environment correctly
# and build OpenSSL, then invokes a simple nmake session
# for the actual _ssl.pyd and _hashlib.pyd DLLs.
# THEORETICALLY, you can:
# * Unpack the latest SSL release one level above your main Python source
# directory. It is likely you will already find the zlib library and
# any other external packages there.
# * Install ActivePerl and ensure it is somewhere on your path.
# * Run this script from the PCBuild directory.
#
# it should configure and build SSL, then build the _ssl and _hashlib
# Python extensions without intervention.
# Modified by Christian Heimes
# Now this script supports pre-generated makefiles and assembly files.
# Developers don't need an installation of Perl anymore to build Python. A svn
# checkout from our svn repository is enough.
#
# In Order to create the files in the case of an update you still need Perl.
# Run build_ssl in this order:
# python.exe build_ssl.py Release x64
# python.exe build_ssl.py Release Win32
import os, sys, re, shutil
# Find all "foo.exe" files on the PATH.
def find_all_on_path(filename, extras = None):
entries = os.environ["PATH"].split(os.pathsep)
ret = []
for p in entries:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
if extras:
for p in extras:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
return ret
# Find a suitable Perl installation for OpenSSL.
# cygwin perl does *not* work. ActivePerl does.
# Being a Perl dummy, the simplest way I can check is if the "Win32" package
# is available.
def find_working_perl(perls):
for perl in perls:
fh = os.popen('"%s" -e "use Win32;"' % perl)
fh.read()
rc = fh.close()
if rc:
continue
return perl
print("Can not find a suitable PERL:")
if perls:
print(" the following perl interpreters were found:")
for p in perls:
print(" ", p)
print(" None of these versions appear suitable for building OpenSSL")
else:
print(" NO perl interpreters were found on this machine at all!")
print(" Please install ActivePerl and ensure it appears on your path")
return None
# Locate the best SSL directory given a few roots to look into.
def find_best_ssl_dir(sources):
candidates = []
for s in sources:
try:
# note: do not abspath s; the build will fail if any
# higher up directory name has spaces in it.
fnames = os.listdir(s)
except os.error:
fnames = []
for fname in fnames:
fqn = os.path.join(s, fname)
if os.path.isdir(fqn) and fname.startswith("openssl-"):
candidates.append(fqn)
# Now we have all the candidates, locate the best.
best_parts = []
best_name = None
for c in candidates:
parts = re.split("[.-]", os.path.basename(c))[1:]
# eg - openssl-0.9.7-beta1 - ignore all "beta" or any other qualifiers
if len(parts) >= 4:
continue
if parts > best_parts:
best_parts = parts
best_name = c
if best_name is not None:
print("Found an SSL directory at '%s'" % (best_name,))
else:
print("Could not find an SSL directory in '%s'" % (sources,))
sys.stdout.flush()
return best_name
def create_makefile64(makefile, m32):
"""Create and fix makefile for 64bit
Replace 32 with 64bit directories
"""
if not os.path.isfile(m32):
return
with open(m32) as fin:
with open(makefile, 'w') as fout:
for line in fin:
line = line.replace("=tmp32", "=tmp64")
line = line.replace("=out32", "=out64")
line = line.replace("=inc32", "=inc64")
# force 64 bit machine
line = line.replace("MKLIB=lib", "MKLIB=lib /MACHINE:X64")
line = line.replace("LFLAGS=", "LFLAGS=/MACHINE:X64 ")
# don't link against the lib on 64bit systems
line = line.replace("bufferoverflowu.lib", "")
fout.write(line)
os.unlink(m32)
def fix_makefile(makefile):
"""Fix some stuff in all makefiles
"""
if not os.path.isfile(makefile):
return
# 2.4 compatibility
fin = open(makefile)
if 1: # with open(makefile) as fin:
lines = fin.readlines()
fin.close()
fout = open(makefile, 'w')
if 1: # with open(makefile, 'w') as fout:
for line in lines:
if line.startswith("PERL="):
continue
if line.startswith("CP="):
line = "CP=copy\n"
if line.startswith("MKDIR="):
line = "MKDIR=mkdir\n"
if line.startswith("CFLAG="):
line = line.strip()
for algo in ("RC5", "MDC2", "IDEA"):
noalgo = " -DOPENSSL_NO_%s" % algo
if noalgo not in line:
line = line + noalgo
line = line + '\n'
fout.write(line)
fout.close()
def run_configure(configure, do_script):
print("perl Configure "+configure)
os.system("perl Configure "+configure)
print(do_script)
os.system(do_script)
def main():
build_all = "-a" in sys.argv
if sys.argv[1] == "Release":
debug = False
elif sys.argv[1] == "Debug":
debug = True
else:
raise ValueError(str(sys.argv))
if sys.argv[2] == "Win32":
arch = "x86"
configure = "VC-WIN32"
do_script = "ms\\do_nasm"
makefile="ms\\nt.mak"
m32 = makefile
elif sys.argv[2] == "x64":
arch="amd64"
configure = "VC-WIN64A"
do_script = "ms\\do_win64a"
makefile = "ms\\nt64.mak"
m32 = makefile.replace('64', '')
#os.environ["VSEXTCOMP_USECL"] = "MS_OPTERON"
else:
raise ValueError(str(sys.argv))
make_flags = ""
if build_all:
make_flags = "-a"
# perl should be on the path, but we also look in "\perl" and "c:\\perl"
# as "well known" locations
perls = find_all_on_path("perl.exe", ["\\perl\\bin", "C:\\perl\\bin"])
perl = find_working_perl(perls)
if perl:
print("Found a working perl at '%s'" % (perl,))
else:
print("No Perl installation was found. Existing Makefiles are used.")
sys.stdout.flush()
# Look for SSL 2 levels up from pcbuild - ie, same place zlib etc all live.
ssl_dir = find_best_ssl_dir(("..\\..",))
if ssl_dir is None:
sys.exit(1)
old_cd = os.getcwd()
try:
os.chdir(ssl_dir)
# rebuild makefile when we do the role over from 32 to 64 build
if arch == "amd64" and os.path.isfile(m32) and not os.path.isfile(makefile):
os.unlink(m32)
# If the ssl makefiles do not exist, we invoke Perl to generate them.
# Due to a bug in this script, the makefile sometimes ended up empty
# Force a regeneration if it is.
if not os.path.isfile(makefile) or os.path.getsize(makefile)==0:
if perl is None:
print("Perl is required to build the makefiles!")
sys.exit(1)
print("Creating the makefiles...")
sys.stdout.flush()
# Put our working Perl at the front of our path
os.environ["PATH"] = os.path.dirname(perl) + \
os.pathsep + \
os.environ["PATH"]
run_configure(configure, do_script)
if debug:
print("OpenSSL debug builds aren't supported.")
#if arch=="x86" and debug:
# # the do_masm script in openssl doesn't generate a debug
# # build makefile so we generate it here:
# os.system("perl util\mk1mf.pl debug "+configure+" >"+makefile)
| if arch == "amd64": | 982 | lcc_e | python | null | c737e0826af057af7bdde835b7d29b5b6e193c58c82a25f4 |
|
from io import BytesIO
from translate.convert import prop2po, test_convert
from translate.storage import po, properties
class TestProp2PO:
@staticmethod
def prop2po(propsource, proptemplate=None, personality="java"):
"""helper that converts .properties source to po source without requiring files"""
inputfile = BytesIO(propsource.encode())
inputprop = properties.propfile(inputfile, personality=personality)
convertor = prop2po.prop2po(personality=personality)
if proptemplate:
templatefile = BytesIO(proptemplate.encode())
templateprop = properties.propfile(templatefile)
outputpo = convertor.mergestore(templateprop, inputprop)
else:
outputpo = convertor.convertstore(inputprop)
return outputpo
@staticmethod
def convertprop(propsource):
"""call the convertprop, return the outputfile"""
inputfile = BytesIO(propsource.encode())
outputfile = BytesIO()
templatefile = None
assert prop2po.convertprop(inputfile, outputfile, templatefile)
return outputfile.getvalue()
@staticmethod
def singleelement(pofile):
"""checks that the pofile contains a single non-header element, and returns it"""
assert len(pofile.units) == 2
assert pofile.units[0].isheader()
print(pofile)
return pofile.units[1]
@staticmethod
def countelements(pofile):
"""counts the number of non-header entries"""
assert pofile.units[0].isheader()
print(pofile)
return len(pofile.units) - 1
def test_simpleentry(self):
"""checks that a simple properties entry converts properly to a po entry"""
propsource = "SAVEENTRY=Save file\n"
pofile = self.prop2po(propsource)
pounit = self.singleelement(pofile)
assert pounit.source == "Save file"
assert pounit.target == ""
def test_convertprop(self):
"""checks that the convertprop function is working"""
propsource = "SAVEENTRY=Save file\n"
posource = self.convertprop(propsource)
pofile = po.pofile(BytesIO(posource))
pounit = self.singleelement(pofile)
assert pounit.source == "Save file"
assert pounit.target == ""
def test_no_value_entry(self):
"""checks that a properties entry without value is converted"""
propsource = "KEY = \n"
pofile = self.prop2po(propsource)
pounit = self.singleelement(pofile)
assert pounit.getcontext() == "KEY"
assert pounit.source == ""
assert pounit.target == ""
def test_no_separator_entry(self):
"""checks that a properties entry without separator is converted"""
propsource = "KEY\n"
pofile = self.prop2po(propsource)
pounit = self.singleelement(pofile)
assert pounit.getcontext() == "KEY"
assert pounit.source == ""
assert pounit.target == ""
def test_tab_at_end_of_string(self):
"""check that we preserve tabs at the end of a string"""
propsource = r"TAB_AT_END=This setence has a tab at the end.\t"
pofile = self.prop2po(propsource)
pounit = self.singleelement(pofile)
assert pounit.source == "This setence has a tab at the end.\t"
propsource = (
r"SPACE_THEN_TAB_AT_END=This setence has a space then tab at the end. \t"
)
pofile = self.prop2po(propsource)
pounit = self.singleelement(pofile)
assert pounit.source == "This setence has a space then tab at the end. \t"
propsource = r"SPACE_AT_END=This setence will keep its 4 spaces at the end. "
pofile = self.prop2po(propsource)
pounit = self.singleelement(pofile)
assert pounit.source == "This setence will keep its 4 spaces at the end. "
propsource = (
r"SPACE_AT_END_NO_TRIM=This setence will keep its 4 spaces at the end.\ "
)
pofile = self.prop2po(propsource)
pounit = self.singleelement(pofile)
assert pounit.source == "This setence will keep its 4 spaces at the end. "
propsource = r"SPACE_AT_END_NO_TRIM2=This setence will keep its 4 spaces at the end.\\ "
pofile = self.prop2po(propsource)
pounit = self.singleelement(pofile)
assert pounit.source == "This setence will keep its 4 spaces at the end.\\ "
def test_tab_at_start_of_value(self):
"""check that tabs in a property are ignored where appropriate"""
propsource = r"property = value"
pofile = self.prop2po(propsource)
pounit = self.singleelement(pofile)
assert pounit.getlocations()[0] == "property"
assert pounit.source == "value"
def test_unicode(self):
"""checks that unicode entries convert properly"""
unistring = r"Norsk bokm\u00E5l"
propsource = "nb = %s\n" % unistring
pofile = self.prop2po(propsource)
pounit = self.singleelement(pofile)
print(repr(pofile.units[0].target))
print(repr(pounit.source))
assert pounit.source == "Norsk bokm\u00E5l"
def test_multiline_escaping(self):
"""checks that multiline enties can be parsed"""
propsource = r"""5093=Unable to connect to your IMAP server. You may have exceeded the maximum number \
of connections to this server. If so, use the Advanced IMAP Server Settings dialog to \
reduce the number of cached connections."""
pofile = self.prop2po(propsource)
print(repr(pofile.units[1].target))
assert self.countelements(pofile) == 1
def test_comments(self):
"""test to ensure that we take comments from .properties and place them in .po"""
propsource = """# Comment
prefPanel-smime=Security"""
pofile = self.prop2po(propsource)
pounit = self.singleelement(pofile)
assert pounit.getnotes("developer") == "# Comment"
def test_multiline_comments(self):
"""test to ensure that we handle multiline comments well"""
propsource = """# Comment
# commenty 2
## @name GENERIC_ERROR
## @loc none
prefPanel-smime=
"""
pofile = self.prop2po(propsource)
print(bytes(pofile))
# header comments:
assert b"#. # Comment\n#. # commenty 2" in bytes(pofile)
pounit = self.singleelement(pofile)
assert pounit.getnotes("developer") == "## @name GENERIC_ERROR\n## @loc none"
def test_folding_accesskeys(self):
"""check that we can fold various accesskeys into their associated label (bug #115)"""
propsource = r"""cmd_addEngine.label = Add Engines...
cmd_addEngine.accesskey = A"""
pofile = self.prop2po(propsource, personality="mozilla")
pounit = self.singleelement(pofile)
assert pounit.source == "&Add Engines..."
def test_dont_translate(self):
"""check that we know how to ignore don't translate instructions in properties files (bug #116)"""
propsource = """# LOCALIZATION NOTE (dont): DONT_TRANSLATE.
dont=don't translate me
do=translate me
"""
pofile = self.prop2po(propsource)
assert self.countelements(pofile) == 1
def test_emptyproperty(self):
"""checks that empty property definitions survive into po file, bug 15"""
for delimiter in ["=", ""]:
propsource = "# comment\ncredit%s" % delimiter
pofile = self.prop2po(propsource)
pounit = self.singleelement(pofile)
assert pounit.getlocations() == ["credit"]
assert pounit.getcontext() == "credit"
assert 'msgctxt "credit"' in str(pounit)
assert b"#. # comment" in bytes(pofile)
assert pounit.source == ""
def test_emptyproperty_translated(self):
"""checks that if we translate an empty property it makes it into the PO"""
for delimiter in ["=", ""]:
proptemplate = "credit%s" % delimiter
propsource = "credit=Translators Names"
pofile = self.prop2po(propsource, proptemplate)
pounit = self.singleelement(pofile)
assert pounit.getlocations() == ["credit"]
# FIXME we don't seem to get a _: comment but we should
# assert pounit.getcontext() == "credit"
assert pounit.source == ""
assert pounit.target == "Translators Names"
def test_newlines_in_value(self):
"""check that we can carry newlines that appear in the property value into the PO"""
propsource = """prop=\\nvalue\\n\n"""
pofile = self.prop2po(propsource)
unit = self.singleelement(pofile)
assert unit.source == "\nvalue\n"
def test_header_comments(self):
"""check that we can handle comments not directly associated with a property"""
propsource = """# Header comment\n\n# Comment\n\nprop=value\n"""
pofile = self.prop2po(propsource)
unit = self.singleelement(pofile)
assert unit.source == "value"
assert unit.getnotes("developer") == "# Comment"
def test_unassociated_comment_order(self):
"""check that we can handle the order of unassociated comments"""
propsource = """# Header comment\n\n# 1st Unassociated comment\n\n# 2nd Connected comment\nprop=value\n"""
pofile = self.prop2po(propsource)
unit = self.singleelement(pofile)
assert unit.source == "value"
assert (
unit.getnotes("developer")
== "# 1st Unassociated comment\n\n# 2nd Connected comment"
)
def test_x_header(self):
"""Test that we correctly create the custom header entries
(accelerators, merge criterion).
"""
propsource = """prop=value\n"""
outputpo = self.prop2po(propsource, personality="mozilla")
assert b"X-Accelerator-Marker" in bytes(outputpo)
assert b"X-Merge-On" in bytes(outputpo)
# Even though the gaia flavour inherrits from mozilla, it should not
# get the header
outputpo = self.prop2po(propsource, personality="gaia")
assert b"X-Accelerator-Marker" not in bytes(outputpo)
assert b"X-Merge-On" not in bytes(outputpo)
def test_gaia_plurals(self):
"""Test conversion of gaia plural units."""
propsource = """
message-multiedit-header={[ plural(n) ]}
message-multiedit-header[zero]=Edit
message-multiedit-header[one]={{ n }} selected
message-multiedit-header[two]={{ n }} selected
message-multiedit-header[few]={{ n }} selected
message-multiedit-header[many]={{ n }} selected
message-multiedit-header[other]={{ n }} selected
"""
outputpo = self.prop2po(propsource, personality="gaia")
pounit = outputpo.units[-1]
assert pounit.hasplural()
assert pounit.getlocations() == ["message-multiedit-header"]
print(outputpo)
zero_unit = outputpo.units[-2]
assert not zero_unit.hasplural()
assert zero_unit.source == "Edit"
def test_successive_gaia_plurals(self):
"""Test conversion of two successive gaia plural units."""
propsource = """
message-multiedit-header={[ plural(n) ]}
message-multiedit-header[zero]=Edit
message-multiedit-header[one]={{ n }} selected
message-multiedit-header[two]={{ n }} selected
message-multiedit-header[few]={{ n }} selected
message-multiedit-header[many]={{ n }} selected
message-multiedit-header[other]={{ n }} selected
message-multiedit-header2={[ plural(n) ]}
message-multiedit-header2[zero]=Edit 2
message-multiedit-header2[one]={{ n }} selected 2
message-multiedit-header2[two]={{ n }} selected 2
message-multiedit-header2[few]={{ n }} selected 2
message-multiedit-header2[many]={{ n }} selected 2
message-multiedit-header2[other]={{ n }} selected 2
"""
outputpo = self.prop2po(propsource, personality="gaia")
pounit = outputpo.units[-1]
assert pounit.hasplural()
assert pounit.getlocations() == ["message-multiedit-header2"]
pounit = outputpo.units[-3]
assert pounit.hasplural()
assert pounit.getlocations() == ["message-multiedit-header"]
print(outputpo)
zero_unit = outputpo.units[-2]
assert not zero_unit.hasplural()
assert zero_unit.source == "Edit 2"
zero_unit = outputpo.units[-4]
assert not zero_unit.hasplural()
assert zero_unit.source == "Edit"
def test_duplicate_keys(self):
"""Check that we correctly handle duplicate keys."""
source = """
key=value
key=value
"""
po_file = self.prop2po(source)
assert self.countelements(po_file) == 1
po_unit = self.singleelement(po_file)
assert po_unit.source == "value"
source = """
key=value
key=another value
"""
po_file = self.prop2po(source)
assert self.countelements(po_file) == 2
po_unit = po_file.units[1]
assert po_unit.source == "value"
assert po_unit.getlocations() == ["key"]
po_unit = po_file.units[2]
assert po_unit.source == "another value"
assert po_unit.getlocations() == ["key"]
source = """
key1=value
key2=value
"""
po_file = self.prop2po(source)
assert self.countelements(po_file) == 2
po_unit = po_file.units[1]
assert po_unit.source == "value"
assert po_unit.getlocations() == ["key1"]
po_unit = po_file.units[2]
assert po_unit.source == "value"
assert po_unit.getlocations() == ["key2"]
def test_gwt_plurals(self):
"""Test conversion of gwt plural units."""
propsource = """
message-multiedit-header={0,number} selected
message-multiedit-header[none]=Edit
message-multiedit-header[one]={0,number} selected
message-multiedit-header[two]={0,number} selected
message-multiedit-header[few]={0,number} selected
message-multiedit-header[many]={0,number} selected
"""
outputpo = self.prop2po(propsource, personality="gwt")
pounit = outputpo.units[-1]
assert pounit.getlocations() == ["message-multiedit-header"]
class TestProp2POCommand(test_convert.TestConvertCommand, TestProp2PO):
"""Tests running actual prop2po commands on files"""
convertmodule = prop2po
defaultoptions = {"progress": "none"}
def test_help(self, capsys):
"""tests getting help"""
options = super().test_help(capsys)
| options = self.help_check(options, "-P, --pot") | 1,336 | lcc_e | python | null | b2821f4a0fef62fd70ef5f4e22b1d56b272a3581d330dac8 |
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_inter_controller
short_description: Configure inter wireless controller operation in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wireless_controller feature and inter_controller category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
wireless_controller_inter_controller:
description:
- Configure inter wireless controller operation.
default: null
type: dict
suboptions:
fast_failover_max:
description:
- Maximum number of retransmissions for fast failover HA messages between peer wireless controllers (3 - 64).
type: int
fast_failover_wait:
description:
- Minimum wait time before an AP transitions from secondary controller to primary controller (10 - 86400 sec).
type: int
inter_controller_key:
description:
- Secret key for inter-controller communications.
type: str
inter_controller_mode:
description:
- Configure inter-controller mode (disable, l2-roaming, 1+1).
type: str
choices:
- disable
- l2-roaming
- 1+1
inter_controller_peer:
description:
- Fast failover peer wireless controller list.
type: list
suboptions:
id:
description:
- ID.
required: true
type: int
peer_ip:
description:
- Peer wireless controller's IP address.
type: str
peer_port:
description:
- Port used by the wireless controller's for inter-controller communications (1024 - 49150).
type: int
peer_priority:
description:
- Peer wireless controller's priority (primary or secondary).
type: str
choices:
- primary
- secondary
inter_controller_pri:
description:
- Configure inter-controller's priority (primary or secondary).
type: str
choices:
- primary
- secondary
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure inter wireless controller operation.
fortios_wireless_controller_inter_controller:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
wireless_controller_inter_controller:
fast_failover_max: "3"
fast_failover_wait: "4"
inter_controller_key: "<your_own_value>"
inter_controller_mode: "disable"
inter_controller_peer:
-
id: "8"
peer_ip: "<your_own_value>"
peer_port: "10"
peer_priority: "primary"
inter_controller_pri: "primary"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wireless_controller_inter_controller_data(json):
option_list = ['fast_failover_max', 'fast_failover_wait', 'inter_controller_key',
'inter_controller_mode', 'inter_controller_peer', 'inter_controller_pri']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wireless_controller_inter_controller(data, fos):
vdom = data['vdom']
wireless_controller_inter_controller_data = data['wireless_controller_inter_controller']
filtered_data = underscore_to_hyphen(filter_wireless_controller_inter_controller_data(wireless_controller_inter_controller_data))
return fos.set('wireless-controller',
'inter-controller',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wireless_controller(data, fos):
if data['wireless_controller_inter_controller']:
resp = wireless_controller_inter_controller(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"wireless_controller_inter_controller": {
"required": False, "type": "dict", "default": None,
"options": {
"fast_failover_max": {"required": False, "type": "int"},
"fast_failover_wait": {"required": False, "type": "int"},
"inter_controller_key": {"required": False, "type": "str", "no_log": True},
"inter_controller_mode": {"required": False, "type": "str",
"choices": ["disable", "l2-roaming", "1+1"]},
"inter_controller_peer": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"},
"peer_ip": {"required": False, "type": "str"},
"peer_port": {"required": False, "type": "int"},
"peer_priority": {"required": False, "type": "str",
"choices": ["primary", "secondary"]}
}},
"inter_controller_pri": {"required": False, "type": "str",
"choices": ["primary", "secondary"]}
}
}
}
| module = AnsibleModule(argument_spec=fields, | 1,009 | lcc_e | python | null | e199272f351e3f6118b8425dc96ee820466bf95092f4e4a1 |
|
#!/usr/bin/env python
import socket
import struct
import threading
import time
import dns
from dnsdisttests import DNSDistTest
try:
range = xrange
except NameError:
pass
class TestTCPShort(DNSDistTest):
# this test suite uses a different responder port
# because, contrary to the other ones, its
# responders allow trailing data and multiple responses,
# and we don't want to mix things up.
_testServerPort = 5361
_serverKey = 'server.key'
_serverCert = 'server.chain'
_serverName = 'tls.tests.dnsdist.org'
_caCert = 'ca.pem'
_tlsServerPort = 8453
_tcpSendTimeout = 60
_config_template = """
newServer{address="127.0.0.1:%s"}
addTLSLocal("127.0.0.1:%s", "%s", "%s")
setTCPSendTimeout(%d)
"""
_config_params = ['_testServerPort', '_tlsServerPort', '_serverCert', '_serverKey', '_tcpSendTimeout']
@classmethod
def startResponders(cls):
print("Launching responders..")
cls._UDPResponder = threading.Thread(name='UDP Responder', target=cls.UDPResponder, args=[cls._testServerPort, cls._toResponderQueue, cls._fromResponderQueue, True])
cls._UDPResponder.setDaemon(True)
cls._UDPResponder.start()
cls._TCPResponder = threading.Thread(name='TCP Responder', target=cls.TCPResponder, args=[cls._testServerPort, cls._toResponderQueue, cls._fromResponderQueue, True, True])
cls._TCPResponder.setDaemon(True)
cls._TCPResponder.start()
def testTCPShortRead(self):
"""
TCP: Short read from client
"""
name = 'short-read.tcp-short.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
expectedResponse = dns.message.make_response(query)
rrset = dns.rrset.from_text(name,
3600,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
expectedResponse.answer.append(rrset)
conn = self.openTCPConnection()
wire = query.to_wire()
# announce 7680 bytes (more than 4096, less than 8192 - the 512 bytes dnsdist is going to add)
announcedSize = 7680
paddingSize = announcedSize - len(wire)
wire = wire + (b'A' * (paddingSize - 1))
self._toResponderQueue.put(expectedResponse, True, 2.0)
sizeBytes = struct.pack("!H", announcedSize)
conn.send(sizeBytes[:1])
time.sleep(1)
conn.send(sizeBytes[1:])
# send announcedSize bytes minus 1 so we get a second read
conn.send(wire)
time.sleep(1)
# send the remaining byte
conn.send(b'A')
(receivedQuery, receivedResponse) = self.recvTCPResponseOverConnection(conn, True)
conn.close()
self.assertTrue(receivedQuery)
self.assertTrue(receivedResponse)
receivedQuery.id = query.id
self.assertEqual(query, receivedQuery)
self.assertEqual(receivedResponse, expectedResponse)
def testTCPTLSShortRead(self):
"""
TCP/TLS: Short read from client
"""
name = 'short-read-tls.tcp-short.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
expectedResponse = dns.message.make_response(query)
rrset = dns.rrset.from_text(name,
3600,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
expectedResponse.answer.append(rrset)
conn = self.openTLSConnection(self._tlsServerPort, self._serverName, self._caCert)
wire = query.to_wire()
# announce 7680 bytes (more than 4096, less than 8192 - the 512 bytes dnsdist is going to add)
announcedSize = 7680
paddingSize = announcedSize - len(wire)
wire = wire + (b'A' * (paddingSize - 1))
self._toResponderQueue.put(expectedResponse, True, 2.0)
sizeBytes = struct.pack("!H", announcedSize)
conn.send(sizeBytes[:1])
time.sleep(1)
conn.send(sizeBytes[1:])
# send announcedSize bytes minus 1 so we get a second read
conn.send(wire)
time.sleep(1)
# send the remaining byte
conn.send(b'A')
(receivedQuery, receivedResponse) = self.recvTCPResponseOverConnection(conn, True)
conn.close()
self.assertTrue(receivedQuery)
self.assertTrue(receivedResponse)
receivedQuery.id = query.id
self.assertEqual(query, receivedQuery)
self.assertEqual(receivedResponse, expectedResponse)
def testTCPShortWrite(self):
"""
TCP: Short write to client
"""
name = 'short-write.tcp-short.tests.powerdns.com.'
query = dns.message.make_query(name, 'AXFR', 'IN')
# we prepare a large AXFR answer
# SOA + 200 dns messages of one huge TXT RRset each + SOA
responses = []
soa = dns.rrset.from_text(name,
60,
dns.rdataclass.IN,
dns.rdatatype.SOA,
'ns.' + name + ' hostmaster.' + name + ' 1 3600 3600 3600 60')
soaResponse = dns.message.make_response(query)
soaResponse.use_edns(edns=False)
soaResponse.answer.append(soa)
responses.append(soaResponse)
response = dns.message.make_response(query)
response.use_edns(edns=False)
content = ""
for i in range(200):
if len(content) > 0:
content = content + ', '
content = content + (str(i)*50)
rrset = dns.rrset.from_text(name,
3600,
dns.rdataclass.IN,
dns.rdatatype.TXT,
content)
response.answer.append(rrset)
for _ in range(200):
responses.append(response)
responses.append(soaResponse)
conn = self.openTCPConnection()
for response in responses:
self._toResponderQueue.put(response, True, 2.0)
self.sendTCPQueryOverConnection(conn, query)
# we sleep for one second, making sure that dnsdist
# will fill its TCP window and buffers, which will result
# in some short writes
time.sleep(1)
# we then read the messages
receivedResponses = []
while True:
datalen = conn.recv(2)
if not datalen:
break
(datalen,) = struct.unpack("!H", datalen)
data = b''
remaining = datalen
got = conn.recv(remaining)
while got:
data = data + got
if len(data) == datalen:
break
remaining = remaining - len(got)
if remaining <= 0:
break
got = conn.recv(remaining)
if data and len(data) == datalen:
receivedResponse = dns.message.from_wire(data)
receivedResponses.append(receivedResponse)
receivedQuery = None
if not self._fromResponderQueue.empty():
receivedQuery = self._fromResponderQueue.get(True, 2.0)
conn.close()
# and check that everything is good
self.assertTrue(receivedQuery)
receivedQuery.id = query.id
self.assertEqual(query, receivedQuery)
self.assertEqual(receivedResponses, responses)
def testTCPTLSShortWrite(self):
"""
TCP/TLS: Short write to client
"""
# same as testTCPShortWrite but over TLS this time
name = 'short-write-tls.tcp-short.tests.powerdns.com.'
query = dns.message.make_query(name, 'AXFR', 'IN')
responses = []
soa = dns.rrset.from_text(name,
60,
dns.rdataclass.IN,
dns.rdatatype.SOA,
'ns.' + name + ' hostmaster.' + name + ' 1 3600 3600 3600 60')
| soaResponse = dns.message.make_response(query) | 648 | lcc_e | python | null | aab6e3b1dcab2261ae6d783a39b356522fa6e32c6b4b0579 |
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import urllib, base64
import time
import boto.utils
import types
from boto.connection import AWSAuthConnection
from boto import handler
from boto.s3.bucket import Bucket
from boto.s3.key import Key
from boto.resultset import ResultSet
from boto.exception import S3ResponseError, S3CreateError, BotoClientError
def assert_case_insensitive(f):
def wrapper(*args, **kwargs):
if len(args) == 3 and not (args[2].islower() or args[2].isalnum()):
raise BotoClientError("Bucket names cannot contain upper-case " \
"characters when using either the sub-domain or virtual " \
"hosting calling format.")
return f(*args, **kwargs)
return wrapper
class _CallingFormat:
def build_url_base(self, protocol, server, bucket, key=''):
url_base = '%s://' % protocol
url_base += self.build_host(server, bucket)
url_base += self.build_path_base(bucket, key)
return url_base
def build_host(self, server, bucket):
if bucket == '':
return server
else:
return self.get_bucket_server(server, bucket)
def build_auth_path(self, bucket, key=''):
path = ''
if bucket != '':
path = '/' + bucket
return path + '/%s' % urllib.quote(key)
def build_path_base(self, bucket, key=''):
return '/%s' % urllib.quote(key)
class SubdomainCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return '%s.%s' % (bucket, server)
class VHostCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return bucket
class OrdinaryCallingFormat(_CallingFormat):
def get_bucket_server(self, server, bucket):
return server
def build_path_base(self, bucket, key=''):
path_base = '/'
if bucket:
path_base += "%s/" % bucket
return path_base + urllib.quote(key)
class Location:
DEFAULT = ''
EU = 'EU'
class S3Connection(AWSAuthConnection):
DefaultHost = 's3.amazonaws.com'
QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=DefaultHost, debug=0, https_connection_factory=None,
calling_format=SubdomainCallingFormat(), path='/'):
self.calling_format = calling_format
AWSAuthConnection.__init__(self, host,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
debug=debug, https_connection_factory=https_connection_factory,
path=path)
def __iter__(self):
return self.get_all_buckets()
def __contains__(self, bucket_name):
return not (self.lookup(bucket_name) is None)
def build_post_policy(self, expiration_time, conditions):
"""
Taken from the AWS book Python examples and modified for use with boto
"""
if type(expiration_time) != time.struct_time:
raise 'Policy document must include a valid expiration Time object'
# Convert conditions object mappings to condition statements
return '{"expiration": "%s",\n"conditions": [%s]}' % \
(time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions))
def build_post_form_args(self, bucket_name, key, expires_in = 6000,
acl = None, success_action_redirect = None, max_content_length = None,
http_method = "http"):
"""
Taken from the AWS book Python examples and modified for use with boto
This only returns the arguments required for the post form, not the actual form
This does not return the file input field which also needs to be added
@param bucket_name: Bucket to submit to
@param key: Key name, optionally add ${filename} to the end to attach the submitted filename
@param expires_in: Time (in seconds) before this expires, defaults to 6000
@param acl: ACL rule to use, if any
@param success_action_redirect: URL to redirect to on success
@param max_content_length: Maximum size for this file
@param http_method: HTTP Method to use, "http" or "https"
@return: {"action": action_url_to_post_to, "fields": [ {"name": field_name, "value": field_value}, {"name": field_name2, "value": field_value2} ] }
@rtype: dict
"""
fields = []
conditions = []
expiration = time.gmtime(int(time.time() + expires_in))
# Generate policy document
conditions.append('{"bucket": "%s"}' % bucket_name)
if key.endswith("${filename}"):
conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")])
else:
conditions.append('{"key": "%s"}' % key)
if acl:
conditions.append('{"acl": "%s"}' % acl)
fields.append({ "name": "acl", "value": acl})
if success_action_redirect:
conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect)
fields.append({ "name": "success_action_redirect", "value": success_action_redirect})
if max_content_length:
conditions.append('["content-length-range", 0, %i]' % max_content_length)
fields.append({"name":'content-length-range', "value": "0,%i" % max_content_length})
policy = self.build_post_policy(expiration, conditions)
# Add the base64-encoded policy document as the 'policy' field
policy_b64 = base64.b64encode(policy)
fields.append({"name": "policy", "value": policy_b64})
# Add the AWS access key as the 'AWSAccessKeyId' field
fields.append({"name": "AWSAccessKeyId", "value": self.aws_access_key_id})
# Add signature for encoded policy document as the 'AWSAccessKeyId' field
hmac_copy = self.hmac.copy()
hmac_copy.update(policy_b64)
signature = base64.encodestring(hmac_copy.digest()).strip()
fields.append({"name": "signature", "value": signature})
fields.append({"name": "key", "value": key})
# HTTPS protocol will be used if the secure HTTP option is enabled.
url = '%s://%s.s3.amazonaws.com/' % (http_method, bucket_name)
return {"action": url, "fields": fields}
def generate_url(self, expires_in, method, bucket='', key='',
headers=None, query_auth=True, force_http=False):
if not headers:
headers = {}
expires = int(time.time() + expires_in)
auth_path = self.calling_format.build_auth_path(bucket, key)
canonical_str = boto.utils.canonical_string(method, auth_path,
headers, expires)
hmac_copy = self.hmac.copy()
hmac_copy.update(canonical_str)
b64_hmac = base64.encodestring(hmac_copy.digest()).strip()
encoded_canonical = urllib.quote_plus(b64_hmac)
path = self.calling_format.build_path_base(bucket, key)
if query_auth:
query_part = '?' + self.QueryString % (encoded_canonical, expires,
self.aws_access_key_id)
else:
query_part = ''
if force_http:
protocol = 'http'
else:
protocol = self.protocol
return self.calling_format.build_url_base(protocol, self.server_name(),
bucket, key) + query_part
def get_all_buckets(self):
response = self.make_request('GET')
body = response.read()
if response.status > 300:
raise S3ResponseError(response.status, response.reason, body)
rs = ResultSet([('Bucket', Bucket)])
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
def get_canonical_user_id(self):
"""
Convenience method that returns the "CanonicalUserID" of the user who's credentials
are associated with the connection. The only way to get this value is to do a GET
request on the service which returns all buckets associated with the account. As part
of that response, the canonical userid is returned. This method simply does all of
that and then returns just the user id.
@rtype: string
@return: A string containing the canonical user id.
"""
rs = self.get_all_buckets()
return rs.ID
def get_bucket(self, bucket_name, validate=True):
bucket = Bucket(self, bucket_name)
if validate:
rs = bucket.get_all_keys(None, maxkeys=0)
return bucket
def lookup(self, bucket_name, validate=True):
try:
bucket = self.get_bucket(bucket_name, validate)
except:
bucket = None
return bucket
def create_bucket(self, bucket_name, headers=None, location=Location.DEFAULT, policy=None):
"""
Creates a new located bucket. By default it's in the USA. You can pass
Location.EU to create an European bucket.
@type bucket_name: string
@param bucket_name: The name of the new bucket
@type headers: dict
@param headers: Additional headers to pass along with the request to AWS.
@type location: L{Location<boto.s3.connection.Location>}
@param location: The location of the new bucket
@type policy: L{CannedACLString<boto.s3.acl.CannedACLStrings>}
@param policy: A canned ACL policy that will be applied to the new key in S3.
"""
if policy:
if headers:
headers['x-amz-acl'] = policy
else:
headers = {'x-amz-acl' : policy}
if location == Location.DEFAULT:
data = ''
else:
data = '<CreateBucketConstraint><LocationConstraint>' + \
location + '</LocationConstraint></CreateBucketConstraint>'
response = self.make_request('PUT', bucket_name, headers=headers,
data=data)
body = response.read()
if response.status == 409:
raise S3CreateError(response.status, response.reason, body)
if response.status == 200:
return Bucket(self, bucket_name)
else:
raise S3ResponseError(response.status, response.reason, body)
def delete_bucket(self, bucket):
response = self.make_request('DELETE', bucket)
body = response.read()
if response.status != 204:
raise S3ResponseError(response.status, response.reason, body)
def make_request(self, method, bucket='', key='', headers=None, data='',
query_args=None, sender=None):
if isinstance(bucket, Bucket):
bucket = bucket.name
if isinstance(key, Key):
key = key.name
path = self.calling_format.build_path_base(bucket, key)
auth_path = self.calling_format.build_auth_path(bucket, key)
host = self.calling_format.build_host(self.server_name(), bucket)
if query_args:
| path += '?' + query_args | 1,198 | lcc_e | python | null | c48d09a8554b241e4d79956b75662da87b99458aa872b0de |
|
"""Container and Namespace classes"""
import anydbm
import cPickle
import logging
import os
import time
import beaker.util as util
from beaker.exceptions import CreationAbortedError, MissingCacheParameter
from beaker.synchronization import _threading, file_synchronizer, \
mutex_synchronizer, NameLock, null_synchronizer
__all__ = ['Value', 'Container', 'ContainerContext',
'MemoryContainer', 'DBMContainer', 'NamespaceManager',
'MemoryNamespaceManager', 'DBMNamespaceManager', 'FileContainer',
'OpenResourceNamespaceManager',
'FileNamespaceManager', 'CreationAbortedError']
logger = logging.getLogger('beaker.container')
if logger.isEnabledFor(logging.DEBUG):
debug = logger.debug
else:
def debug(message, *args):
pass
class NamespaceManager(object):
"""Handles dictionary operations and locking for a namespace of
values.
The implementation for setting and retrieving the namespace data is
handled by subclasses.
NamespaceManager may be used alone, or may be privately accessed by
one or more Container objects. Container objects provide per-key
services like expiration times and automatic recreation of values.
Multiple NamespaceManagers created with a particular name will all
share access to the same underlying datasource and will attempt to
synchronize against a common mutex object. The scope of this
sharing may be within a single process or across multiple
processes, depending on the type of NamespaceManager used.
The NamespaceManager itself is generally threadsafe, except in the
case of the DBMNamespaceManager in conjunction with the gdbm dbm
implementation.
"""
@classmethod
def _init_dependencies(cls):
pass
def __init__(self, namespace):
self._init_dependencies()
self.namespace = namespace
def get_creation_lock(self, key):
raise NotImplementedError()
def do_remove(self):
raise NotImplementedError()
def acquire_read_lock(self):
pass
def release_read_lock(self):
pass
def acquire_write_lock(self, wait=True):
return True
def release_write_lock(self):
pass
def has_key(self, key):
return self.__contains__(key)
def __getitem__(self, key):
raise NotImplementedError()
def __setitem__(self, key, value):
raise NotImplementedError()
def set_value(self, key, value, expiretime=None):
"""Optional set_value() method called by Value.
Allows an expiretime to be passed, for namespace
implementations which can prune their collections
using expiretime.
"""
self[key] = value
def __contains__(self, key):
raise NotImplementedError()
def __delitem__(self, key):
raise NotImplementedError()
def keys(self):
raise NotImplementedError()
def remove(self):
self.do_remove()
class OpenResourceNamespaceManager(NamespaceManager):
"""A NamespaceManager where read/write operations require opening/
closing of a resource which is possibly mutexed.
"""
def __init__(self, namespace):
NamespaceManager.__init__(self, namespace)
self.access_lock = self.get_access_lock()
self.openers = 0
self.mutex = _threading.Lock()
def get_access_lock(self):
raise NotImplementedError()
def do_open(self, flags):
raise NotImplementedError()
def do_close(self):
raise NotImplementedError()
def acquire_read_lock(self):
self.access_lock.acquire_read_lock()
try:
self.open('r', checkcount = True)
except:
self.access_lock.release_read_lock()
raise
def release_read_lock(self):
try:
self.close(checkcount = True)
finally:
self.access_lock.release_read_lock()
def acquire_write_lock(self, wait=True):
r = self.access_lock.acquire_write_lock(wait)
try:
if (wait or r):
self.open('c', checkcount = True)
return r
except:
self.access_lock.release_write_lock()
raise
def release_write_lock(self):
try:
self.close(checkcount=True)
finally:
self.access_lock.release_write_lock()
def open(self, flags, checkcount=False):
self.mutex.acquire()
try:
if checkcount:
if self.openers == 0:
self.do_open(flags)
self.openers += 1
else:
self.do_open(flags)
self.openers = 1
finally:
self.mutex.release()
def close(self, checkcount=False):
self.mutex.acquire()
try:
if checkcount:
self.openers -= 1
if self.openers == 0:
self.do_close()
else:
if self.openers > 0:
self.do_close()
self.openers = 0
finally:
self.mutex.release()
def remove(self):
self.access_lock.acquire_write_lock()
try:
self.close(checkcount=False)
self.do_remove()
finally:
self.access_lock.release_write_lock()
class Value(object):
__slots__ = 'key', 'createfunc', 'expiretime', 'expire_argument', 'starttime', 'storedtime',\
'namespace'
def __init__(self, key, namespace, createfunc=None, expiretime=None, starttime=None):
self.key = key
self.createfunc = createfunc
self.expire_argument = expiretime
self.starttime = starttime
self.storedtime = -1
self.namespace = namespace
def has_value(self):
"""return true if the container has a value stored.
This is regardless of it being expired or not.
"""
self.namespace.acquire_read_lock()
try:
return self.namespace.has_key(self.key)
finally:
self.namespace.release_read_lock()
def can_have_value(self):
return self.has_current_value() or self.createfunc is not None
def has_current_value(self):
self.namespace.acquire_read_lock()
try:
has_value = self.namespace.has_key(self.key)
if has_value:
try:
stored, expired, value = self._get_value()
return not self._is_expired(stored, expired)
except KeyError:
pass
return False
finally:
self.namespace.release_read_lock()
def _is_expired(self, storedtime, expiretime):
"""Return true if this container's value is expired."""
return (
(
self.starttime is not None and
storedtime < self.starttime
)
or
(
expiretime is not None and
time.time() >= expiretime + storedtime
)
)
def get_value(self):
self.namespace.acquire_read_lock()
try:
has_value = self.has_value()
if has_value:
try:
stored, expired, value = self._get_value()
if not self._is_expired(stored, expired):
return value
except KeyError:
# guard against un-mutexed backends raising KeyError
has_value = False
if not self.createfunc:
raise KeyError(self.key)
finally:
self.namespace.release_read_lock()
has_createlock = False
creation_lock = self.namespace.get_creation_lock(self.key)
if has_value:
if not creation_lock.acquire(wait=False):
debug("get_value returning old value while new one is created")
return value
else:
debug("lock_creatfunc (didnt wait)")
has_createlock = True
if not has_createlock:
debug("lock_createfunc (waiting)")
creation_lock.acquire()
debug("lock_createfunc (waited)")
try:
# see if someone created the value already
self.namespace.acquire_read_lock()
try:
if self.has_value():
try:
stored, expired, value = self._get_value()
if not self._is_expired(stored, expired):
return value
except KeyError:
# guard against un-mutexed backends raising KeyError
pass
finally:
self.namespace.release_read_lock()
debug("get_value creating new value")
v = self.createfunc()
self.set_value(v)
return v
finally:
creation_lock.release()
debug("released create lock")
def _get_value(self):
value = self.namespace[self.key]
try:
stored, expired, value = value
except ValueError:
if not len(value) == 2:
raise
# Old format: upgrade
stored, value = value
expired = self.expire_argument
debug("get_value upgrading time %r expire time %r", stored, self.expire_argument)
self.namespace.release_read_lock()
self.set_value(value, stored)
self.namespace.acquire_read_lock()
except TypeError:
# occurs when the value is None. memcached
# may yank the rug from under us in which case
# that's the result
raise KeyError(self.key)
return stored, expired, value
def set_value(self, value, storedtime=None):
self.namespace.acquire_write_lock()
try:
if storedtime is None:
storedtime = time.time()
debug("set_value stored time %r expire time %r", storedtime, self.expire_argument)
self.namespace.set_value(self.key, (storedtime, self.expire_argument, value))
finally:
self.namespace.release_write_lock()
def clear_value(self):
self.namespace.acquire_write_lock()
try:
debug("clear_value")
if self.namespace.has_key(self.key):
try:
del self.namespace[self.key]
except KeyError:
# guard against un-mutexed backends raising KeyError
pass
self.storedtime = -1
finally:
self.namespace.release_write_lock()
class AbstractDictionaryNSManager(NamespaceManager):
"""A subclassable NamespaceManager that places data in a dictionary.
Subclasses should provide a "dictionary" attribute or descriptor
which returns a dict-like object. The dictionary will store keys
that are local to the "namespace" attribute of this manager, so
ensure that the dictionary will not be used by any other namespace.
e.g.::
import collections
cached_data = collections.defaultdict(dict)
class MyDictionaryManager(AbstractDictionaryNSManager):
def __init__(self, namespace):
AbstractDictionaryNSManager.__init__(self, namespace)
self.dictionary = cached_data[self.namespace]
The above stores data in a global dictionary called "cached_data",
which is structured as a dictionary of dictionaries, keyed
first on namespace name to a sub-dictionary, then on actual
cache key to value.
"""
def get_creation_lock(self, key):
return NameLock(
identifier="memorynamespace/funclock/%s/%s" % (self.namespace, key),
reentrant=True
)
def __getitem__(self, key):
return self.dictionary[key]
def __contains__(self, key):
return self.dictionary.__contains__(key)
def has_key(self, key):
return self.dictionary.__contains__(key)
def __setitem__(self, key, value):
self.dictionary[key] = value
def __delitem__(self, key):
del self.dictionary[key]
def do_remove(self):
self.dictionary.clear()
def keys(self):
return self.dictionary.keys()
class MemoryNamespaceManager(AbstractDictionaryNSManager):
namespaces = util.SyncDict()
def __init__(self, namespace, **kwargs):
AbstractDictionaryNSManager.__init__(self, namespace)
self.dictionary = MemoryNamespaceManager.namespaces.get(self.namespace,
dict)
class DBMNamespaceManager(OpenResourceNamespaceManager):
def __init__(self, namespace, dbmmodule=None, data_dir=None,
dbm_dir=None, lock_dir=None, digest_filenames=True, **kwargs):
self.digest_filenames = digest_filenames
if not dbm_dir and not data_dir:
raise MissingCacheParameter("data_dir or dbm_dir is required")
elif dbm_dir:
self.dbm_dir = dbm_dir
else:
self.dbm_dir = data_dir + "/container_dbm"
util.verify_directory(self.dbm_dir)
if not lock_dir and not data_dir:
raise MissingCacheParameter("data_dir or lock_dir is required")
elif lock_dir:
self.lock_dir = lock_dir
else:
self.lock_dir = data_dir + "/container_dbm_lock"
util.verify_directory(self.lock_dir)
self.dbmmodule = dbmmodule or anydbm
self.dbm = None
OpenResourceNamespaceManager.__init__(self, namespace)
self.file = util.encoded_path(root= self.dbm_dir,
identifiers=[self.namespace],
extension='.dbm',
digest_filenames=self.digest_filenames)
debug("data file %s", self.file)
self._checkfile()
def get_access_lock(self):
return file_synchronizer(identifier=self.namespace,
lock_dir=self.lock_dir)
def get_creation_lock(self, key):
return file_synchronizer(
identifier = "dbmcontainer/funclock/%s" % self.namespace,
lock_dir=self.lock_dir
)
def file_exists(self, file):
if os.access(file, os.F_OK):
return True
else:
for ext in ('db', 'dat', 'pag', 'dir'):
if os.access(file + os.extsep + ext, os.F_OK):
return True
return False
def _checkfile(self):
if not self.file_exists(self.file):
g = self.dbmmodule.open(self.file, 'c')
g.close()
def get_filenames(self):
list = []
if os.access(self.file, os.F_OK):
list.append(self.file)
for ext in ('pag', 'dir', 'db', 'dat'):
| if os.access(self.file + os.extsep + ext, os.F_OK): | 1,142 | lcc_e | python | null | d63d19ecf556ea2e1d2bd315c79a39046985507ff2dd2eb9 |
|
# -*- coding: utf-8 -*-
from ast import literal_eval
from pprint import pformat
import os
import shutil
from errbot import BotPlugin, botcmd
from errbot.plugin_manager import PluginConfigurationException, PluginActivationException
from errbot.repo_manager import RepoException
class Plugins(BotPlugin):
@botcmd(admin_only=True)
def repos_install(self, _, args):
""" install a plugin repository from the given source or a known public repo (see !repos to find those).
for example from a known repo : !install err-codebot
for example a git url : git@github.com:gbin/plugin.git
or an url towards a tar.gz archive : http://www.gootz.net/plugin-latest.tar.gz
"""
args = args.strip()
if not args:
yield "Please specify a repository listed in '!repos' or " \
"give me the URL to a git repository that I should clone for you."
return
try:
yield "Installing %s..." % args
local_path = self._bot.repo_manager.install_repo(args)
errors = self._bot.plugin_manager.update_dynamic_plugins()
if errors:
yield 'Some plugins are generating errors:\n' + '\n'.join(errors.values())
# if the load of the plugin failed, uninstall cleanly teh repo
for path in errors.keys():
if path.startswith(local_path):
yield 'Removing %s as it did not load correctly.' % local_path
shutil.rmtree(local_path)
else:
yield ("A new plugin repository has been installed correctly from "
"%s. Refreshing the plugins commands..." % args)
loading_errors = self._bot.plugin_manager.activate_non_started_plugins()
if loading_errors:
yield loading_errors
yield "Plugins reloaded."
except RepoException as re:
yield "Error installing the repo: %s" % re
@botcmd(admin_only=True)
def repos_uninstall(self, _, repo_name):
""" uninstall a plugin repository by name.
"""
if not repo_name.strip():
yield "You should have a repo name as argument"
return
repos = self._bot.repo_manager.get_installed_plugin_repos()
if repo_name not in repos:
yield "This repo is not installed check with " + self._bot.prefix + "repos the list of installed ones"
return
plugin_path = os.path.join(self._bot.repo_manager.plugin_dir, repo_name)
self._bot.plugin_manager.remove_plugins_from_path(plugin_path)
self._bot.repo_manager.uninstall_repo(repo_name)
yield 'Repo %s removed.' % repo_name
@botcmd(template='repos')
def repos(self, _, args):
""" list the current active plugin repositories
"""
installed_repos = self._bot.repo_manager.get_installed_plugin_repos()
all_names = [name for name in installed_repos]
repos = {'repos': []}
for repo_name in all_names:
installed = False
if repo_name in installed_repos:
installed = True
from_index = self._bot.repo_manager.get_repo_from_index(repo_name)
if from_index is not None:
description = '\n'.join(('%s: %s' % (plug.name, plug.documentation) for plug in from_index))
else:
description = 'No description.'
# installed, public, name, desc
repos['repos'].append((installed, from_index is not None, repo_name, description))
return repos
@botcmd(template='repos2')
def repos_search(self, _, args):
""" Searches the repo index.
for example: !repos search jenkins
"""
if not args:
# TODO(gbin): return all the repos.
return {'error': "Please specify a keyword."}
return {'repos': self._bot.repo_manager.search_repos(args)}
@botcmd(split_args_with=' ', admin_only=True)
def repos_update(self, _, args):
""" update the bot and/or plugins
use : !repos update all
to update everything
or : !repos update repo_name repo_name ...
to update selectively some repos
"""
if 'all' in args:
results = self._bot.repo_manager.update_all_repos()
else:
results = self._bot.repo_manager.update_repos(args)
yield "Start updating ... "
for d, success, feedback in results:
if success:
yield "Update of %s succeeded...\n\n%s\n\n" % (d, feedback)
else:
yield "Update of %s failed...\n\n%s" % (d, feedback)
for plugin in self._bot.plugin_manager.getAllPlugins():
if plugin.path.startswith(d) and hasattr(plugin, 'is_activated') and plugin.is_activated:
name = plugin.name
yield '/me is reloading plugin %s' % name
try:
self._bot.plugin_manager.reload_plugin_by_name(plugin.name)
yield "Plugin %s reloaded." % plugin.name
except PluginActivationException as pae:
yield 'Error reactivating plugin %s: %s' % (plugin.name, pae)
yield "Done."
@botcmd(split_args_with=' ', admin_only=True)
def plugin_config(self, _, args):
""" configure or get the configuration / configuration template for a specific plugin
ie.
!plugin config ExampleBot
could return a template if it is not configured:
{'LOGIN': 'example@example.com', 'PASSWORD': 'password', 'DIRECTORY': '/toto'}
Copy paste, adapt so can configure the plugin :
!plugin config ExampleBot {'LOGIN': 'my@email.com', 'PASSWORD': 'myrealpassword', 'DIRECTORY': '/tmp'}
It will then reload the plugin with this config.
You can at any moment retrieve the current values:
!plugin config ExampleBot
should return :
{'LOGIN': 'my@email.com', 'PASSWORD': 'myrealpassword', 'DIRECTORY': '/tmp'}
"""
plugin_name = args[0]
if self._bot.plugin_manager.is_plugin_blacklisted(plugin_name):
return 'Load this plugin first with ' + self._bot.prefix + 'load %s' % plugin_name
obj = self._bot.plugin_manager.get_plugin_obj_by_name(plugin_name)
if obj is None:
return 'Unknown plugin or the plugin could not load %s' % plugin_name
template_obj = obj.get_configuration_template()
if template_obj is None:
return 'This plugin is not configurable.'
if len(args) == 1:
response = ("Default configuration for this plugin (you can copy and paste "
"this directly as a command):\n\n"
"```\n{prefix}plugin config {plugin_name} \n{config}\n```").format(
prefix=self._bot.prefix, plugin_name=plugin_name, config=pformat(template_obj))
current_config = self._bot.plugin_manager.get_plugin_configuration(plugin_name)
if current_config:
response += ("\n\nCurrent configuration:\n\n"
"```\n{prefix}plugin config {plugin_name} \n{config}\n```").format(
prefix=self._bot.prefix, plugin_name=plugin_name, config=pformat(current_config))
return response
# noinspection PyBroadException
try:
real_config_obj = literal_eval(' '.join(args[1:]))
except Exception:
self.log.exception('Invalid expression for the configuration of the plugin')
return 'Syntax error in the given configuration'
if type(real_config_obj) != type(template_obj):
return 'It looks fishy, your config type is not the same as the template !'
self._bot.plugin_manager.set_plugin_configuration(plugin_name, real_config_obj)
try:
self._bot.plugin_manager.deactivate_plugin(plugin_name)
except PluginActivationException as pae:
return 'Error deactivating %s: %s' % (plugin_name, pae)
try:
self._bot.plugin_manager.activate_plugin(plugin_name)
except PluginConfigurationException as ce:
self.log.debug('Invalid configuration for the plugin, reverting the plugin to unconfigured.')
self._bot.plugin_manager.set_plugin_configuration(plugin_name, None)
return 'Incorrect plugin configuration: %s' % ce
except PluginActivationException as pae:
return 'Error activating plugin: %s' % pae
return 'Plugin configuration done.'
def formatted_plugin_list(self, active_only=True):
"""
Return a formatted, plain-text list of loaded plugins.
When active_only=True, this will only return plugins which
are actually active. Otherwise, it will also include inactive
(blacklisted) plugins.
"""
if active_only:
all_plugins = self._bot.plugin_manager.get_all_active_plugin_names()
else:
| all_plugins = self._bot.plugin_manager.get_all_plugin_names() | 833 | lcc_e | python | null | 50511e2d36d05cdb890a54b72403a62d4a131cdfd6350426 |
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_switch_interface
short_description: Configure software switch interfaces by grouping physical and WiFi interfaces in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and switch_interface category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_switch_interface:
description:
- Configure software switch interfaces by grouping physical and WiFi interfaces.
default: null
type: dict
suboptions:
intra_switch_policy:
description:
- Allow any traffic between switch interfaces or require firewall policies to allow traffic between switch interfaces.
type: str
choices:
- implicit
- explicit
member:
description:
- Names of the interfaces that belong to the virtual switch.
type: list
suboptions:
interface_name:
description:
- Physical interface name. Source system.interface.name.
type: str
name:
description:
- Interface name (name cannot be in use by any other interfaces, VLANs, or inter-VDOM links).
required: true
type: str
span:
description:
- Enable/disable port spanning. Port spanning echoes traffic received by the software switch to the span destination port.
type: str
choices:
- disable
- enable
span_dest_port:
description:
- SPAN destination port name. All traffic on the SPAN source ports is echoed to the SPAN destination port. Source system.interface.name.
type: str
span_direction:
description:
- "The direction in which the SPAN port operates, either: rx, tx, or both."
type: str
choices:
- rx
- tx
- both
span_source_port:
description:
- Physical interface name. Port spanning echoes all traffic on the SPAN source ports to the SPAN destination port.
type: list
suboptions:
interface_name:
description:
- Physical interface name. Source system.interface.name.
type: str
type:
description:
- "Type of switch based on functionality: switch for normal functionality, or hub to duplicate packets to all port members."
type: str
choices:
- switch
- hub
vdom:
description:
- VDOM that the software switch belongs to. Source system.vdom.name.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure software switch interfaces by grouping physical and WiFi interfaces.
fortios_system_switch_interface:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_switch_interface:
intra_switch_policy: "implicit"
member:
-
interface_name: "<your_own_value> (source system.interface.name)"
name: "default_name_6"
span: "disable"
span_dest_port: "<your_own_value> (source system.interface.name)"
span_direction: "rx"
span_source_port:
-
interface_name: "<your_own_value> (source system.interface.name)"
type: "switch"
vdom: "<your_own_value> (source system.vdom.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_switch_interface_data(json):
option_list = ['intra_switch_policy', 'member', 'name',
'span', 'span_dest_port', 'span_direction',
'span_source_port', 'type', 'vdom']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_switch_interface(data, fos):
vdom = data['vdom']
state = data['state']
system_switch_interface_data = data['system_switch_interface']
filtered_data = underscore_to_hyphen(filter_system_switch_interface_data(system_switch_interface_data))
if state == "present":
return fos.set('system',
'switch-interface',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'switch-interface',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_switch_interface']:
resp = system_switch_interface(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_switch_interface": {
"required": False, "type": "dict", "default": None,
"options": {
"intra_switch_policy": {"required": False, "type": "str",
"choices": ["implicit", "explicit"]},
"member": {"required": False, "type": "list",
"options": {
"interface_name": {"required": False, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"span": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"span_dest_port": {"required": False, "type": "str"},
"span_direction": {"required": False, "type": "str",
"choices": ["rx", "tx", "both"]},
"span_source_port": {"required": False, "type": "list",
"options": {
"interface_name": {"required": False, "type": "str"}
}},
"type": {"required": False, "type": "str",
"choices": ["switch", "hub"]},
| "vdom": {"required": False, "type": "str"} | 1,164 | lcc_e | python | null | 4dd6d387cd79be7f92c54f193f47ae6ab681a0ecc6ae3dba |
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_publication
short_description: Add, update, or remove PostgreSQL publication
description:
- Add, update, or remove PostgreSQL publication.
version_added: "2.9"
options:
name:
description:
- Name of the publication to add, update, or remove.
required: true
type: str
db:
description:
- Name of the database to connect to and where
the publication state will be changed.
aliases: [ login_db ]
type: str
tables:
description:
- List of tables to add to the publication.
- If no value is set all tables are targeted.
- If the publication already exists for specific tables and I(tables) is not passed,
nothing will be changed. If you need to add all tables to the publication with the same name,
drop existent and create new without passing I(tables).
type: list
state:
description:
- The publication state.
default: present
choices: [ absent, present ]
type: str
parameters:
description:
- Dictionary with optional publication parameters.
- Available parameters depend on PostgreSQL version.
type: dict
owner:
description:
- Publication owner.
- If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
type: str
cascade:
description:
- Drop publication dependencies. Has effect with I(state=absent) only.
type: bool
default: false
notes:
- PostgreSQL version must be 10 or greater.
seealso:
- name: CREATE PUBLICATION reference
description: Complete reference of the CREATE PUBLICATION command documentation.
link: https://www.postgresql.org/docs/current/sql-createpublication.html
- name: ALTER PUBLICATION reference
description: Complete reference of the ALTER PUBLICATION command documentation.
link: https://www.postgresql.org/docs/current/sql-alterpublication.html
- name: DROP PUBLICATION reference
description: Complete reference of the DROP PUBLICATION command documentation.
link: https://www.postgresql.org/docs/current/sql-droppublication.html
author:
- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
extends_documentation_fragment:
- postgres
'''
EXAMPLES = r'''
- name: Create a new publication with name "acme" targeting all tables in database "test".
postgresql_publication:
db: test
name: acme
- name: Create publication "acme" publishing only prices and vehicles tables.
postgresql_publication:
name: acme
tables:
- prices
- vehicles
- name: >
Create publication "acme", set user alice as an owner, targeting all tables.
Allowable DML operations are INSERT and UPDATE only
postgresql_publication:
name: acme
owner: alice
parameters:
publish: 'insert,update'
- name: >
Assuming publication "acme" exists and there are targeted
tables "prices" and "vehicles", add table "stores" to the publication.
postgresql_publication:
name: acme
tables:
- prices
- vehicles
- stores
- name: Remove publication "acme" if exists in database "test".
postgresql_publication:
db: test
name: acme
state: absent
'''
RETURN = r'''
exists:
description:
- Flag indicates the publication exists or not at the end of runtime.
returned: always
type: bool
sample: true
queries:
description: List of executed queries.
returned: always
type: str
sample: [ 'DROP PUBLICATION "acme" CASCADE' ]
owner:
description: Owner of the publication at the end of runtime.
returned: if publication exists
type: str
sample: "alice"
tables:
description:
- List of tables in the publication at the end of runtime.
- If all tables are published, returns empty list.
returned: if publication exists
type: list
sample: ["\"public\".\"prices\"", "\"public\".\"vehicles\""]
alltables:
description:
- Flag indicates that all tables are published.
returned: if publication exists
type: bool
sample: false
parameters:
description: Publication parameters at the end of runtime.
returned: if publication exists
type: dict
sample: {'publish': {'insert': false, 'delete': false, 'update': true}}
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import pg_quote_identifier
from ansible.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils.six import iteritems
SUPPORTED_PG_VERSION = 10000
################################
# Module functions and classes #
################################
def transform_tables_representation(tbl_list):
"""Add 'public.' to names of tables where a schema identifier is absent
and add quotes to each element.
Args:
tbl_list (list): List of table names.
Returns:
tbl_list (list): Changed list.
"""
for i, table in enumerate(tbl_list):
if '.' not in table:
tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table')
else:
tbl_list[i] = pg_quote_identifier(table.strip(), 'table')
return tbl_list
class PgPublication():
"""Class to work with PostgreSQL publication.
Args:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
name (str): The name of the publication.
Attributes:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
name (str): Name of the publication.
executed_queries (list): List of executed queries.
attrs (dict): Dict with publication attributes.
exists (bool): Flag indicates the publication exists or not.
"""
def __init__(self, module, cursor, name):
self.module = module
self.cursor = cursor
self.name = name
self.executed_queries = []
self.attrs = {
'alltables': False,
'tables': [],
'parameters': {},
'owner': '',
}
self.exists = self.check_pub()
def get_info(self):
"""Refresh the publication information.
Returns:
``self.attrs``.
"""
self.exists = self.check_pub()
return self.attrs
def check_pub(self):
"""Check the publication and refresh ``self.attrs`` publication attribute.
Returns:
True if the publication with ``self.name`` exists, False otherwise.
"""
pub_info = self.__get_general_pub_info()
if not pub_info:
# Publication does not exist:
return False
self.attrs['owner'] = pub_info.get('pubowner')
# Publication DML operations:
self.attrs['parameters']['publish'] = {}
self.attrs['parameters']['publish']['insert'] = pub_info.get('pubinsert', False)
self.attrs['parameters']['publish']['update'] = pub_info.get('pubupdate', False)
self.attrs['parameters']['publish']['delete'] = pub_info.get('pubdelete', False)
if pub_info.get('pubtruncate'):
self.attrs['parameters']['publish']['truncate'] = pub_info.get('pubtruncate')
# If alltables flag is False, get the list of targeted tables:
if not pub_info.get('puballtables'):
table_info = self.__get_tables_pub_info()
# Join sublists [['schema', 'table'], ...] to ['schema.table', ...]
# for better representation:
for i, schema_and_table in enumerate(table_info):
table_info[i] = pg_quote_identifier('.'.join(schema_and_table), 'table')
self.attrs['tables'] = table_info
else:
self.attrs['alltables'] = True
# Publication exists:
return True
def create(self, tables, params, owner, check_mode=True):
"""Create the publication.
Args:
tables (list): List with names of the tables that need to be added to the publication.
params (dict): Dict contains optional publication parameters and their values.
owner (str): Name of the publication owner.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if publication has been created, otherwise False.
"""
changed = True
query_fragments = ["CREATE PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')]
if tables:
query_fragments.append("FOR TABLE %s" % ', '.join(tables))
else:
query_fragments.append("FOR ALL TABLES")
if params:
params_list = []
# Make list ["param = 'value'", ...] from params dict:
for (key, val) in iteritems(params):
params_list.append("%s = '%s'" % (key, val))
# Add the list to query_fragments:
query_fragments.append("WITH (%s)" % ', '.join(params_list))
changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
if owner:
# If check_mode, just add possible SQL to
# executed_queries and return:
self.__pub_set_owner(owner, check_mode=check_mode)
return changed
def update(self, tables, params, owner, check_mode=True):
"""Update the publication.
Args:
tables (list): List with names of the tables that need to be presented in the publication.
params (dict): Dict contains optional publication parameters and their values.
owner (str): Name of the publication owner.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if publication has been updated, otherwise False.
"""
changed = False
# Add or drop tables from published tables suit:
if tables and not self.attrs['alltables']:
# 1. If needs to add table to the publication:
for tbl in tables:
if tbl not in self.attrs['tables']:
# If needs to add table to the publication:
changed = self.__pub_add_table(tbl, check_mode=check_mode)
# 2. if there is a table in targeted tables
# that's not presented in the passed tables:
for tbl in self.attrs['tables']:
if tbl not in tables:
changed = self.__pub_drop_table(tbl, check_mode=check_mode)
elif tables and self.attrs['alltables']:
changed = self.__pub_set_tables(tables, check_mode=check_mode)
# Update pub parameters:
if params:
for key, val in iteritems(params):
if self.attrs['parameters'].get(key):
# In PostgreSQL 10/11 only 'publish' optional parameter is presented.
if key == 'publish':
# 'publish' value can be only a string with comma-separated items
# of allowed DML operations like 'insert,update' or
# 'insert,update,delete', etc.
# Make dictionary to compare with current attrs later:
val_dict = self.attrs['parameters']['publish'].copy()
val_list = val.split(',')
for v in val_dict:
if v in val_list:
val_dict[v] = True
else:
val_dict[v] = False
# Compare val_dict and the dict with current 'publish' parameters,
# if they're different, set new values:
if val_dict != self.attrs['parameters']['publish']:
changed = self.__pub_set_param(key, val, check_mode=check_mode)
# Default behavior for other cases:
elif self.attrs['parameters'][key] != val:
changed = self.__pub_set_param(key, val, check_mode=check_mode)
else:
# If the parameter was not set before:
changed = self.__pub_set_param(key, val, check_mode=check_mode)
# Update pub owner:
if owner:
if owner != self.attrs['owner']:
changed = self.__pub_set_owner(owner, check_mode=check_mode)
return changed
def drop(self, cascade=False, check_mode=True):
"""Drop the publication.
Kwargs:
cascade (bool): Flag indicates that publication needs to be deleted
with its dependencies.
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if publication has been updated, otherwise False.
"""
if self.exists:
query_fragments = []
query_fragments.append("DROP PUBLICATION %s" % pg_quote_identifier(self.name, 'publication'))
if cascade:
query_fragments.append("CASCADE")
return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
def __get_general_pub_info(self):
"""Get and return general publication information.
Returns:
Dict with publication information if successful, False otherwise.
"""
# Check pg_publication.pubtruncate exists (supported from PostgreSQL 11):
pgtrunc_sup = exec_sql(self, ("SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_publication' "
"AND column_name = 'pubtruncate'"), add_to_executed=False)
if pgtrunc_sup:
query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
"p.pubupdate , p.pubdelete, p.pubtruncate FROM pg_publication AS p "
"JOIN pg_catalog.pg_roles AS r "
"ON p.pubowner = r.oid "
"WHERE p.pubname = '%s'" % self.name)
else:
query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
"p.pubupdate , p.pubdelete FROM pg_publication AS p "
"JOIN pg_catalog.pg_roles AS r "
"ON p.pubowner = r.oid "
"WHERE p.pubname = '%s'" % self.name)
result = exec_sql(self, query, add_to_executed=False)
if result:
return result[0]
else:
return False
def __get_tables_pub_info(self):
"""Get and return tables that are published by the publication.
Returns:
List of dicts with published tables.
"""
query = ("SELECT schemaname, tablename "
"FROM pg_publication_tables WHERE pubname = '%s'" % self.name)
return exec_sql(self, query, add_to_executed=False)
def __pub_add_table(self, table, check_mode=False):
"""Add a table to the publication.
Args:
table (str): Table name.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = ("ALTER PUBLICATION %s ADD TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
pg_quote_identifier(table, 'table')))
return self.__exec_sql(query, check_mode=check_mode)
def __pub_drop_table(self, table, check_mode=False):
"""Drop a table from the publication.
Args:
table (str): Table name.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = ("ALTER PUBLICATION %s DROP TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
pg_quote_identifier(table, 'table')))
return self.__exec_sql(query, check_mode=check_mode)
def __pub_set_tables(self, tables, check_mode=False):
"""Set a table suit that need to be published by the publication.
Args:
tables (list): List of tables.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
quoted_tables = [pg_quote_identifier(t, 'table') for t in tables]
query = ("ALTER PUBLICATION %s SET TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
', '.join(quoted_tables)))
return self.__exec_sql(query, check_mode=check_mode)
def __pub_set_param(self, param, value, check_mode=False):
"""Set an optional publication parameter.
Args:
param (str): Name of the parameter.
value (str): Parameter value.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = ("ALTER PUBLICATION %s SET (%s = '%s')" % (pg_quote_identifier(self.name, 'publication'),
param, value))
return self.__exec_sql(query, check_mode=check_mode)
def __pub_set_owner(self, role, check_mode=False):
"""Set a publication owner.
Args:
role (str): Role (user) name that needs to be set as a publication owner.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = ("ALTER PUBLICATION %s OWNER TO %s" % (pg_quote_identifier(self.name, 'publication'),
pg_quote_identifier(role, 'role')))
return self.__exec_sql(query, check_mode=check_mode)
def __exec_sql(self, query, check_mode=False):
"""Execute SQL query.
Note: If we need just to get information from the database,
we use ``exec_sql`` function directly.
Args:
query (str): Query that needs to be executed.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just add ``query`` to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
if check_mode:
self.executed_queries.append(query)
return True
else:
return exec_sql(self, query, ddl=True)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
name=dict(required=True),
db=dict(type='str', aliases=['login_db']),
state=dict(type='str', default='present', choices=['absent', 'present']),
tables=dict(type='list'),
parameters=dict(type='dict'),
owner=dict(type='str'),
cascade=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
# Parameters handling:
name = module.params['name']
state = module.params['state']
tables = module.params['tables']
params = module.params['parameters']
owner = module.params['owner']
cascade = module.params['cascade']
if state == 'absent':
if tables:
module.warn('parameter "tables" is ignored when "state=absent"')
if params:
module.warn('parameter "parameters" is ignored when "state=absent"')
if owner:
module.warn('parameter "owner" is ignored when "state=absent"')
if state == 'present' and cascade:
module.warm('parameter "cascade" is ignored when "state=present"')
# Connect to DB and make cursor object:
conn_params = get_conn_params(module, module.params)
# We check publication state without DML queries execution, so set autocommit:
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Check version:
if cursor.connection.server_version < SUPPORTED_PG_VERSION:
module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
# Nothing was changed by default:
changed = False
###################################
# Create object and do rock'n'roll:
publication = PgPublication(module, cursor, name)
if tables:
tables = transform_tables_representation(tables)
# If module.check_mode=True, nothing will be changed:
| if state == 'present': | 2,176 | lcc_e | python | null | 2d9194c77a619cfea6ab57817de1e00fa23d7a47b317043a |
|
"""This tutorial introduces restricted boltzmann machines (RBM) using Theano.
Boltzmann Machines (BMs) are a particular form of energy-based model which
contain hidden variables. Restricted Boltzmann Machines further restrict BMs
to those without visible-visible and hidden-hidden connections.
"""
import timeit
try:
import PIL.Image as Image
except ImportError:
import Image
import numpy
import theano
import theano.tensor as T
import os
from theano.tensor.shared_randomstreams import RandomStreams
from utils import tile_raster_images
from logistic_sgd import load_data
# start-snippet-1
class RBM(object):
"""Restricted Boltzmann Machine (RBM) """
def __init__(
self,
input=None,
n_visible=784,
n_hidden=500,
W=None,
hbias=None,
vbias=None,
numpy_rng=None,
theano_rng=None
):
"""
RBM constructor. Defines the parameters of the model along with
basic operations for inferring hidden from visible (and vice-versa),
as well as for performing CD updates.
:param input: None for standalone RBMs or symbolic variable if RBM is
part of a larger graph.
:param n_visible: number of visible units
:param n_hidden: number of hidden units
:param W: None for standalone RBMs or symbolic variable pointing to a
shared weight matrix in case RBM is part of a DBN network; in a DBN,
the weights are shared between RBMs and layers of a MLP
:param hbias: None for standalone RBMs or symbolic variable pointing
to a shared hidden units bias vector in case RBM is part of a
different network
:param vbias: None for standalone RBMs or a symbolic variable
pointing to a shared visible units bias
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
if numpy_rng is None:
# create a number generator
numpy_rng = numpy.random.RandomState(1234)
if theano_rng is None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
if W is None:
# W is initialized with `initial_W` which is uniformely
# sampled from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible)) the output of uniform if
# converted using asarray to dtype theano.config.floatX so
# that the code is runable on GPU
initial_W = numpy.asarray(
numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)
),
dtype=theano.config.floatX
)
# theano shared variables for weights and biases
W = theano.shared(value=initial_W, name='W', borrow=True)
if hbias is None:
# create shared variable for hidden units bias
hbias = theano.shared(
value=numpy.zeros(
n_hidden,
dtype=theano.config.floatX
),
name='hbias',
borrow=True
)
if vbias is None:
# create shared variable for visible units bias
vbias = theano.shared(
value=numpy.zeros(
n_visible,
dtype=theano.config.floatX
),
name='vbias',
borrow=True
)
# initialize input layer for standalone RBM or layer0 of DBN
self.input = input
if not input:
self.input = T.matrix('input')
self.W = W
self.hbias = hbias
self.vbias = vbias
self.theano_rng = theano_rng
# **** WARNING: It is not a good idea to put things in this list
# other than shared variables created in this function.
self.params = [self.W, self.hbias, self.vbias]
# end-snippet-1
def free_energy(self, v_sample):
''' Function to compute the free energy '''
wx_b = T.dot(v_sample, self.W) + self.hbias
vbias_term = T.dot(v_sample, self.vbias)
hidden_term = T.sum(T.log(1 + T.exp(wx_b)), axis=1)
return -hidden_term - vbias_term
def propup(self, vis):
'''This function propagates the visible units activation upwards to
the hidden units
Note that we return also the pre-sigmoid activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(vis, self.W) + self.hbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_h_given_v(self, v0_sample):
''' This function infers state of hidden units given visible units '''
# compute the activation of the hidden units given a sample of
# the visibles
pre_sigmoid_h1, h1_mean = self.propup(v0_sample)
# get a sample of the hiddens given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
h1_sample = self.theano_rng.binomial(size=h1_mean.shape,
n=1, p=h1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_h1, h1_mean, h1_sample]
def propdown(self, hid):
'''This function propagates the hidden units activation downwards to
the visible units
Note that we return also the pre_sigmoid_activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(hid, self.W.T) + self.vbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_v_given_h(self, h0_sample):
''' This function infers state of visible units given hidden units '''
# compute the activation of the visible given the hidden sample
pre_sigmoid_v1, v1_mean = self.propdown(h0_sample)
# get a sample of the visible given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
v1_sample = self.theano_rng.binomial(size=v1_mean.shape,
n=1, p=v1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_v1, v1_mean, v1_sample]
def gibbs_hvh(self, h0_sample):
''' This function implements one step of Gibbs sampling,
starting from the hidden state'''
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return [pre_sigmoid_v1, v1_mean, v1_sample,
pre_sigmoid_h1, h1_mean, h1_sample]
def gibbs_vhv(self, v0_sample):
''' This function implements one step of Gibbs sampling,
starting from the visible state'''
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample)
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h1_sample)
return [pre_sigmoid_h1, h1_mean, h1_sample,
pre_sigmoid_v1, v1_mean, v1_sample]
# start-snippet-2
def get_cost_updates(self, lr=0.1, persistent=None, k=1):
"""This functions implements one step of CD-k or PCD-k
:param lr: learning rate used to train the RBM
:param persistent: None for CD. For PCD, shared variable
containing old state of Gibbs chain. This must be a shared
variable of size (batch size, number of hidden units).
:param k: number of Gibbs steps to do in CD-k/PCD-k
Returns a proxy for the cost and the updates dictionary. The
dictionary contains the update rules for weights and biases but
also an update of the shared variable used to store the persistent
chain, if one is used.
"""
# compute positive phase
pre_sigmoid_ph, ph_mean, ph_sample = self.sample_h_given_v(self.input)
# decide how to initialize persistent chain:
# for CD, we use the newly generate hidden sample
# for PCD, we initialize from the old state of the chain
if persistent is None:
chain_start = ph_sample
else:
chain_start = persistent
# end-snippet-2
# perform actual negative phase
# in order to implement CD-k/PCD-k we need to scan over the
# function that implements one gibbs step k times.
# Read Theano tutorial on scan for more information :
# http://deeplearning.net/software/theano/library/scan.html
# the scan will return the entire Gibbs chain
(
[
pre_sigmoid_nvs,
nv_means,
nv_samples,
pre_sigmoid_nhs,
nh_means,
nh_samples
],
updates
) = theano.scan(
self.gibbs_hvh,
# the None are place holders, saying that
# chain_start is the initial state corresponding to the
# 6th output
outputs_info=[None, None, None, None, None, chain_start],
n_steps=k
)
# start-snippet-3
# determine gradients on RBM parameters
# note that we only need the sample at the end of the chain
chain_end = nv_samples[-1]
cost = T.mean(self.free_energy(self.input)) - T.mean(
self.free_energy(chain_end))
# We must not compute the gradient through the gibbs sampling
gparams = T.grad(cost, self.params, consider_constant=[chain_end])
# end-snippet-3 start-snippet-4
# constructs the update dictionary
for gparam, param in zip(gparams, self.params):
# make sure that the learning rate is of the right dtype
updates[param] = param - gparam * T.cast(
lr,
dtype=theano.config.floatX
)
if persistent:
# Note that this works only if persistent is a shared variable
updates[persistent] = nh_samples[-1]
# pseudo-likelihood is a better proxy for PCD
monitoring_cost = self.get_pseudo_likelihood_cost(updates)
else:
# reconstruction cross-entropy is a better proxy for CD
monitoring_cost = self.get_reconstruction_cost(updates,
pre_sigmoid_nvs[-1])
return monitoring_cost, updates
# end-snippet-4
def get_pseudo_likelihood_cost(self, updates):
"""Stochastic approximation to the pseudo-likelihood"""
# index of bit i in expression p(x_i | x_{\i})
bit_i_idx = theano.shared(value=0, name='bit_i_idx')
# binarize the input image by rounding to nearest integer
xi = T.round(self.input)
# calculate free energy for the given bit configuration
fe_xi = self.free_energy(xi)
# flip bit x_i of matrix xi and preserve all other bits x_{\i}
# Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
# the result to xi_flip, instead of working in place on xi.
xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])
# calculate free energy with bit flipped
fe_xi_flip = self.free_energy(xi_flip)
# equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
cost = T.mean(self.n_visible * T.log(T.nnet.sigmoid(fe_xi_flip -
fe_xi)))
# increment bit_i_idx % number as part of updates
updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible
return cost
def get_reconstruction_cost(self, updates, pre_sigmoid_nv):
"""Approximation to the reconstruction error
Note that this function requires the pre-sigmoid activation as
input. To understand why this is so you need to understand a
bit about how Theano works. Whenever you compile a Theano
function, the computational graph that you pass as input gets
optimized for speed and stability. This is done by changing
several parts of the subgraphs with others. One such
optimization expresses terms of the form log(sigmoid(x)) in
terms of softplus. We need this optimization for the
cross-entropy since sigmoid of numbers larger than 30. (or
even less then that) turn to 1. and numbers smaller than
-30. turn to 0 which in terms will force theano to compute
log(0) and therefore we will get either -inf or NaN as
cost. If the value is expressed in terms of softplus we do not
get this undesirable behaviour. This optimization usually
works fine, but here we have a special case. The sigmoid is
applied inside the scan op, while the log is
outside. Therefore Theano will only see log(scan(..)) instead
| of log(sigmoid(..)) and will not apply the wanted | 1,545 | lcc_e | python | null | 2f4b3c5fc9ed076b95ed19ce205e786f9d4793438d046e27 |
|
import subprocess
import cfg
import util
import shutil
import os
import time
import tempfile
import easywebdav
import filecmp
import threading
import Queue
import sys
import random
import hashlib
from nose.tools import with_setup
from nose.tools import assert_raises
#
# NOTE: following tests have been implemented using easywebdav instead of davfs2
#
pjoin = os.path.join
assert subprocess # pyflakes
assert cfg # pyflakes
@with_setup(util.setup_func, util.teardown_func)
def test_case_01_get():
full_name = pjoin(cfg.webdav_backend_directory, "foo")
subprocess.call(["dd","if=/dev/urandom",'of=%s' % full_name,"bs=100kB","count=10"],
stderr=util.FNULL)
# could be as well a list
# Im using a queue here to make sure it's thread safe
test_fails = Queue.Queue()
def download_foo(c):
try:
with tempfile.NamedTemporaryFile() as f:
c.download("foo",f)
f.flush()
assert filecmp.cmp(full_name, f.name)
except Exception:
test_fails.put( sys.exc_info() )
connections = [util.connect_easywebdav() for _ in range(100)]
ts = [threading.Thread(target=download_foo,args=(c,) )
for c in connections ]
for t in ts:
t.start()
for t in ts:
t.join()
try:
while True:
exc = test_fails.get(block=False)
raise exc[0], exc[1], exc[2]
except Queue.Empty:
pass
@with_setup(util.setup_func, util.teardown_func)
def test_case_02_mkcol():
# could be as well a list
# Im using a queue here to make sure it's thread safe
test_fails = Queue.Queue()
def mkcol_foo(c):
try:
try:
c.mkdir("foo")
except easywebdav.OperationFailed:
pass
except Exception:
test_fails.put( sys.exc_info() )
connections = [util.connect_easywebdav() for _ in range(100)]
ts = [threading.Thread(target=mkcol_foo,args=(c,) )
for c in connections ]
for t in ts:
t.start()
for t in ts:
t.join()
try:
while True:
exc = test_fails.get(block=False)
raise exc[0], exc[1], exc[2]
except Queue.Empty:
pass
assert os.path.isdir( pjoin( cfg.webdav_backend_directory, "foo") )
@with_setup(util.setup_func, util.teardown_func)
def test_case_03_mkcol_different():
# could be as well a list
# Im using a queue here to make sure it's thread safe
test_fails = Queue.Queue()
def mkcol(c,name):
try:
c.mkdir(name)
except Exception:
test_fails.put( sys.exc_info() )
connections = [util.connect_easywebdav() for _ in range(100)]
ts = [threading.Thread(target=mkcol, args=(c,"foo%d" % i) )
for i,c in enumerate(connections) ]
for t in ts:
t.start()
for t in ts:
t.join()
try:
while True:
exc = test_fails.get(block=False)
raise exc[0], exc[1], exc[2]
except Queue.Empty:
pass
for i in range(100):
assert os.path.isdir( pjoin( cfg.webdav_backend_directory, "foo%d" %i) )
@with_setup(util.setup_func, util.teardown_func)
def test_case_04_put():
# could be as well a list
# Im using a queue here to make sure it's thread safe
test_fails = Queue.Queue()
checksums = Queue.Queue()
def put(c):
try:
with tempfile.TemporaryFile() as f:
rand_1mb = ''.join( [ chr( random.randint(0,100) ) for _ in range(1024*1024) ] )
f.write(rand_1mb)
f.seek(0)
c.upload(f, "foo")
m = hashlib.md5()
m.update( rand_1mb )
checksums.put( m.digest() )
except Exception:
test_fails.put( sys.exc_info() )
connections = [util.connect_easywebdav() for _ in range(10)]
ts = [threading.Thread(target=put, args=(c,) )
for c in connections ]
for t in ts:
t.start()
for t in ts:
t.join()
try:
while True:
exc = test_fails.get(block=False)
raise exc[0], exc[1], exc[2]
except Queue.Empty:
pass
full_name = pjoin( cfg.webdav_backend_directory, "foo" )
assert os.path.isfile( full_name )
with open(full_name, "rb") as f:
m = hashlib.md5()
m.update( f.read() )
expected_checksum = m.digest()
while True:
try:
checksum = checksums.get(block=False)
if checksum == expected_checksum:
break
except Queue.Empty:
assert False # file uploaded doesn't match any of the orig files!
@with_setup(util.setup_func, util.teardown_func)
def test_case_05_put_diff():
# could be as well a list
# Im using a queue here to make sure it's thread safe
test_fails = Queue.Queue()
checksums = Queue.Queue()
def put(c,name):
try:
with tempfile.TemporaryFile() as f:
rand_1mb = ''.join( [ chr( random.randint(0,100) ) for _ in range(1024*1024) ] )
f.write(rand_1mb)
f.seek(0)
c.upload(f, name)
m = hashlib.md5()
m.update( rand_1mb )
checksums.put( (name,m.digest()) )
except Exception:
test_fails.put( sys.exc_info() )
connections = [util.connect_easywebdav() for _ in range(10)]
ts = [threading.Thread(target=put, args=(c,"foo-%d" % i) )
for i,c in enumerate(connections) ]
for t in ts:
t.start()
for t in ts:
t.join()
try:
while True:
exc = test_fails.get(block=False)
raise exc[0], exc[1], exc[2]
except Queue.Empty:
pass
assert checksums.qsize() == 10
try:
while True:
| name, checksum = checksums.get(block=False) | 599 | lcc_e | python | null | 9e99b91db2d990fbd559f63ba39622bf1c97aaa7f105a287 |
|
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
"""
sibilant.pseudops.stack
Stack counting utilities for sibiliant pseudops blocks
author: Christopher O'Brien <obriencj@gmail.com>
license: LGPL v.3
"""
from . import Pseudop, Opcode
from ..lib import SibilantException
__all__ = (
"StackCounter", "StackUnderrun", "StackMismatch",
"stacker", "stacking",
"stacking_pop_args_push", "stacking_push_args_pop",
)
class StackUnderrun(SibilantException):
def __init__(self, stack_size, pop_size):
self.start_size = stack_size
self.pop_size = pop_size
msg = "stack size underrun, %i decremented by %i"
super().__init__(msg % (stack_size, pop_size))
class StackMismatch(SibilantException):
def __init__(self, label, label_size, jump_size):
self.label = label
self.label_size = label_size
self.jump_size = jump_size
msg = "mismatch between label %r stack: %i and origin stack: %i"
super().__init__(msg % (label, label_size, jump_size))
class MetaStackCounter(type):
def __init__(self, name, bases, members):
# the _translations_ member is merged together with those of
# the bases, to provide a sort of inheritance.
stk = {}
for base in bases:
stk.update(getattr(base, "_stackers_", {}))
stk.update(members.get("_stackers_", {}))
# we also hunt for decorated callable members to inject into
# the _stackers_
for memb in members.values():
if callable(memb):
trop = getattr(memb, "_stacks_", None)
if trop is not None:
stk[trop] = memb
members["_stackers_"] = stk
self._stackers_ = stk
return super().__init__(name, bases, members)
def stacker(pseudop):
def decorator(member):
member._stacks_ = pseudop
return member
return decorator
def stacking(pushc, popc):
def pop_then_push(self, pseudop, args, push, pop):
if popc:
pop(popc)
if pushc:
push(pushc)
return pop_then_push
def stacking_pop_args_push(pushc):
def pop_args_then_push(self, pseudop, args, push, pop):
pop(args[0])
if pushc:
push(pushc)
return pop_args_then_push
def stacking_push_args_pop(popc):
def pop_then_push_args(self, pseudops, args, push, pop):
if popc:
pop(popc)
push(args[0])
return pop_then_push_args
class StackCounter(metaclass=MetaStackCounter):
_stackers_ = {
Pseudop.POSITION: stacking(0, 0),
Pseudop.DEL_VAR: stacking(0, 0),
Pseudop.DEL_GLOBAL: stacking(0, 0),
Pseudop.ROT_TWO: stacking(0, 0),
Pseudop.ROT_THREE: stacking(0, 0),
Pseudop.CONST: stacking(1, 0),
Pseudop.DUP: stacking(1, 0),
Pseudop.GET_VAR: stacking(1, 0),
Pseudop.GET_GLOBAL: stacking(1, 0),
Pseudop.BREAK_LOOP: stacking(1, 0),
Pseudop.FOR_ITER: stacking(1, 0),
Pseudop.IMPORT_FROM: stacking(1, 0),
Pseudop.CONTINUE_LOOP: stacking(1, 0),
Pseudop.LOAD_CELL: stacking(1, 0),
Pseudop.GET_ATTR: stacking(1, 1),
Pseudop.UNARY_POSITIVE: stacking(1, 1),
Pseudop.UNARY_NEGATIVE: stacking(1, 1),
Pseudop.UNARY_NOT: stacking(1, 1),
Pseudop.UNARY_INVERT: stacking(1, 1),
Pseudop.ITER: stacking(1, 1),
Pseudop.GET_YIELD_FROM_ITER: stacking(1, 1),
Pseudop.GET_AWAITABLE: stacking(1, 1),
Pseudop.YIELD_VAL: stacking(1, 1),
Pseudop.COMPARE_OP: stacking(1, 2),
Pseudop.GET_ITEM: stacking(1, 2),
Pseudop.BINARY_ADD: stacking(1, 2),
Pseudop.BINARY_SUBTRACT: stacking(1, 2),
Pseudop.BINARY_MULTIPLY: stacking(1, 2),
Pseudop.BINARY_MATRIX_MULTIPLY: stacking(1, 2),
Pseudop.BINARY_TRUE_DIVIDE: stacking(1, 2),
Pseudop.BINARY_FLOOR_DIVIDE: stacking(1, 2),
Pseudop.BINARY_POWER: stacking(1, 2),
Pseudop.BINARY_MODULO: stacking(1, 2),
Pseudop.BINARY_LSHIFT: stacking(1, 2),
Pseudop.BINARY_RSHIFT: stacking(1, 2),
Pseudop.BINARY_AND: stacking(1, 2),
Pseudop.BINARY_XOR: stacking(1, 2),
Pseudop.BINARY_OR: stacking(1, 2),
Pseudop.IMPORT_NAME: stacking(1, 2),
Pseudop.SET_ATTR: stacking(0, 2),
Pseudop.DEL_ITEM: stacking(0, 2),
Pseudop.SET_ITEM: stacking(0, 3),
Pseudop.POP: stacking(0, 1),
Pseudop.DEL_ATTR: stacking(0, 1),
Pseudop.SET_GLOBAL: stacking(0, 1),
Pseudop.SET_LOCAL: stacking(0, 1),
Pseudop.SET_VAR: stacking(0, 1),
Pseudop.RET_VAL: stacking(0, 1),
Pseudop.YIELD_FROM: stacking(1, 2),
Pseudop.SETUP_EXCEPT: stacking(6, 0),
Pseudop.POP_EXCEPT: stacking(0, 3),
Pseudop.SETUP_WITH: stacking(6, 0),
Pseudop.WITH_CLEANUP_START: stacking(7, 0),
Pseudop.WITH_CLEANUP_FINISH: stacking(0, 7),
Pseudop.SETUP_FINALLY: stacking(6, 0),
Pseudop.END_FINALLY: stacking(0, 6),
Pseudop.BUILD_LIST: stacking_pop_args_push(1),
Pseudop.BUILD_SET: stacking_pop_args_push(1),
Pseudop.BUILD_TUPLE: stacking_pop_args_push(1),
Pseudop.BUILD_TUPLE_UNPACK: stacking_pop_args_push(1),
Pseudop.BUILD_MAP_UNPACK: stacking_pop_args_push(1),
Pseudop.BUILD_SLICE: stacking_pop_args_push(1),
Pseudop.RAISE: stacking_pop_args_push(1),
Pseudop.FAUX_POP: stacking_pop_args_push(0),
Pseudop.UNPACK_SEQUENCE: stacking_push_args_pop(1),
Pseudop.FAUX_PUSH: stacking_push_args_pop(0),
}
# @stacker(Pseudop.LAMBDA)
# def stacks_lambda(self, pseudop, args, push, pop):
# pop(args[1])
# a = len(args[0].co_freevars)
# if a:
# push(a)
# pop(a)
# push(2)
# pop(2)
# push()
@stacker(Pseudop.BUILD_MAP)
def stacker_build_map(self, pseudop, args, push, pop):
pop(args[0] * 2)
push()
@stacker(Pseudop.UNPACK_EX)
def stacker_unpack_ex(self, pseudop, args, push, pop):
pop()
push(args[0] + args[1] + 1)
@stacker(Pseudop.SETUP_LOOP)
def stacker_setup_loop(self, pseudop, args, push, pop):
push(Opcode.SETUP_LOOP.stack_effect(1))
@stacker(Pseudop.POP_BLOCK)
def stacker_pop_block(self, pseudop, args, push, pop):
push(Opcode.POP_BLOCK.stack_effect())
@stacker(Pseudop.LABEL)
def stacker_label(self, pseudop, args, push, pop):
stac = self.stack_count
stac = self.labels.setdefault(args[0], stac)
self.stack_count = stac
@stacker(Pseudop.BLOCK)
def stacker_block(self, pseudop, args, push, pop):
block = args[0]
block_i, block_max = block.max_stack(self.compiler)
push(block_max)
pop(block_max)
@stacker(Pseudop.JUMP)
def stacker_jump(self, pseudop, args, push, pop):
stac = self.stack_count
| dest = args[0] | 655 | lcc_e | python | null | 087a12a2ee41ab7a59bdf67a92f3e49b77514c4b6dc1816e |
|
# Copyright (c) 2011-2015 Rusty Wagner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from BinaryData import *
from Structure import *
from HexEditor import *
from View import *
class ElfFile(BinaryAccessor):
def __init__(self, data):
self.data = data
self.valid = False
self.callbacks = []
self.symbols_by_name = {}
self.symbols_by_addr = {}
if not self.is_elf():
return
try:
self.tree = Structure(self.data)
self.header = self.tree.struct("ELF header", "header")
self.header.struct("ELF identification", "ident")
self.header.ident.uint32("magic")
self.header.ident.uint8("file_class")
self.header.ident.uint8("encoding")
self.header.ident.uint8("version")
self.header.ident.uint8("abi")
self.header.ident.uint8("abi_version")
self.header.ident.bytes(7, "pad")
self.header.uint16("type")
self.header.uint16("arch")
self.header.uint32("version")
self.symbol_table_section = None
self.dynamic_symbol_table_section = None
if self.header.ident.file_class == 1: # 32-bit
self.header.uint32("entry")
self.header.uint32("program_header_offset")
self.header.uint32("section_header_offset")
self.header.uint32("flags")
self.header.uint16("header_size")
self.header.uint16("program_header_size")
self.header.uint16("program_header_count")
self.header.uint16("section_header_size")
self.header.uint16("section_header_count")
self.header.uint16("string_table")
try:
self.sections = self.tree.array(self.header.section_header_count, "sections")
for i in range(0, self.header.section_header_count):
section = self.sections[i]
section.seek(self.header.section_header_offset + (i * 40))
section.uint32("name")
section.uint32("type")
section.uint32("flags")
section.uint32("addr")
section.uint32("offset")
section.uint32("size")
section.uint32("link")
section.uint32("info")
section.uint32("align")
section.uint32("entry_size")
if section.type == 2:
self.symbol_table_section = section
elif section.type == 11:
self.dynamic_symbol_table_section = section
except:
# Section headers are not required to load an ELF, skip errors
self.sections = self.tree.array(0, "sections")
pass
self.program_headers = self.tree.array(self.header.program_header_count, "programHeaders")
for i in range(0, self.header.program_header_count):
header = self.program_headers[i]
header.seek(self.header.program_header_offset + (i * 32))
header.uint32("type")
header.uint32("offset")
header.uint32("virtual_addr")
header.uint32("physical_addr")
header.uint32("file_size")
header.uint32("memory_size")
header.uint32("flags")
header.uint32("align")
# Parse symbol tables
self.symbols_by_name["_start"] = self.entry()
self.symbols_by_addr[self.entry()] = "_start"
try:
if self.symbol_table_section:
self.symbol_table = self.tree.array(self.symbol_table_section.size / 16, "Symbols", "symbols")
self.parse_symbol_table_32(self.symbol_table, self.symbol_table_section, self.sections[self.symbol_table_section.link])
if self.dynamic_symbol_table_section:
self.dynamic_symbol_table = self.tree.array(self.dynamic_symbol_table_section.size / 16, "Symbols", "symbols")
self.parse_symbol_table_32(self.dynamic_symbol_table, self.dynamic_symbol_table_section, self.sections[self.dynamic_symbol_table_section.link])
except:
# Skip errors in symbol table
pass
# Parse relocation tables
self.plt = {}
for section in self.sections:
if section.type == 9:
self.parse_reloc_32(section)
elif section.type == 4:
self.parse_reloca_32(section)
elif self.header.ident.file_class == 2: # 64-bit
self.header.uint64("entry")
self.header.uint64("program_header_offset")
self.header.uint64("section_header_offset")
self.header.uint32("flags")
self.header.uint16("header_size")
self.header.uint16("program_header_size")
self.header.uint16("program_header_count")
self.header.uint16("section_header_size")
self.header.uint16("section_header_count")
self.header.uint16("string_table")
try:
self.sections = self.tree.array(self.header.section_header_count, "sections")
for i in range(0, self.header.section_header_count):
section = self.sections[i]
section.seek(self.header.section_header_offset + (i * 64))
section.uint32("name")
section.uint32("type")
section.uint64("flags")
section.uint64("addr")
section.uint64("offset")
section.uint64("size")
section.uint32("link")
section.uint32("info")
section.uint64("align")
section.uint64("entry_size")
if section.type == 2:
self.symbol_table_section = section
elif section.type == 11:
self.dynamic_symbol_table_section = section
except:
# Section headers are not required to load an ELF, skip errors
self.sections = self.tree.array(0, "sections")
pass
self.program_headers = self.tree.array(self.header.program_header_count, "program_headers")
for i in range(0, self.header.program_header_count):
header = self.program_headers[i]
header.seek(self.header.program_header_offset + (i * 56))
header.uint32("type")
header.uint32("flags")
header.uint64("offset")
header.uint64("virtual_addr")
header.uint64("physical_addr")
header.uint64("file_size")
header.uint64("memory_size")
header.uint64("align")
# Parse symbol tables
self.symbols_by_name["_start"] = self.entry()
self.symbols_by_addr[self.entry()] = "_start"
try:
if self.symbol_table_section:
self.symbol_table = self.tree.array(self.symbol_table_section.size / 24, "Symbols", "symbols")
self.parse_symbol_table_64(self.symbol_table, self.symbol_table_section, self.sections[self.symbol_table_section.link])
if self.dynamic_symbol_table_section:
self.dynamic_symbol_table = self.tree.array(self.dynamic_symbol_table_section.size / 24, "Symbols", "symbols")
self.parse_symbol_table_64(self.dynamic_symbol_table, self.dynamic_symbol_table_section, self.sections[self.dynamic_symbol_table_section.link])
except:
# Skip errors in symbol table
pass
# Parse relocation tables
self.plt = {}
for section in self.sections:
if section.type == 9:
self.parse_reloc_64(section)
elif section.type == 4:
self.parse_reloca_64(section)
self.tree.complete()
self.valid = True
except:
self.valid = False
if self.valid:
self.data.add_callback(self)
def read_string_table(self, strings, offset):
end = strings.find("\x00", offset)
return strings[offset:end]
def parse_symbol_table_32(self, table, section, string_table):
strings = self.data.read(string_table.offset, string_table.size)
for i in range(0, section.size / 16):
table[i].seek(section.offset + (i * 16))
table[i].uint32("name_offset")
table[i].uint32("value")
table[i].uint32("size")
table[i].uint8("info")
table[i].uint8("other")
table[i].uint16("section")
table[i].name = self.read_string_table(strings, table[i].name_offset)
if len(table[i].name) > 0:
self.symbols_by_name[table[i].name] = table[i].value
self.symbols_by_addr[table[i].value] = table[i].name
def parse_symbol_table_64(self, table, section, string_table):
strings = self.data.read(string_table.offset, string_table.size)
for i in range(0, section.size / 24):
table[i].seek(section.offset + (i * 24))
table[i].uint32("name_offset")
table[i].uint8("info")
table[i].uint8("other")
table[i].uint16("section")
table[i].uint64("value")
table[i].uint64("size")
table[i].name = self.read_string_table(strings, table[i].name_offset)
if len(table[i].name) > 0:
self.symbols_by_name[table[i].name] = table[i].value
self.symbols_by_addr[table[i].value] = table[i].name
def parse_reloc_32(self, section):
for i in range(0, section.size / 8):
ofs = self.data.read_uint32(section.offset + (i * 8))
info = self.data.read_uint32(section.offset + (i * 8) + 4)
sym = info >> 8
reloc_type = info & 0xff
if reloc_type == 7: # R_386_JUMP_SLOT
self.plt[ofs] = self.dynamic_symbol_table[sym].name
self.symbols_by_name[self.decorate_plt_name(self.dynamic_symbol_table[sym].name)] = ofs
self.symbols_by_addr[ofs] = self.decorate_plt_name(self.dynamic_symbol_table[sym].name)
def parse_reloca_32(self, section):
for i in range(0, section.size / 12):
ofs = self.data.read_uint32(section.offset + (i * 12))
info = self.data.read_uint32(section.offset + (i * 12) + 4)
sym = info >> 8
reloc_type = info & 0xff
if reloc_type == 7: # R_386_JUMP_SLOT
self.plt[ofs] = self.dynamic_symbol_table[sym].name
self.symbols_by_name[self.decorate_plt_name(self.dynamic_symbol_table[sym].name)] = ofs
self.symbols_by_addr[ofs] = self.decorate_plt_name(self.dynamic_symbol_table[sym].name)
def parse_reloc_64(self, section):
for i in range(0, section.size / 16):
ofs = self.data.read_uint64(section.offset + (i * 16))
info = self.data.read_uint64(section.offset + (i * 16) + 8)
sym = info >> 32
reloc_type = info & 0xff
if reloc_type == 7: # R_X86_64_JUMP_SLOT
self.plt[ofs] = self.dynamic_symbol_table[sym].name
self.symbols_by_name[self.decorate_plt_name(self.dynamic_symbol_table[sym].name)] = ofs
self.symbols_by_addr[ofs] = self.decorate_plt_name(self.dynamic_symbol_table[sym].name)
def parse_reloca_64(self, section):
for i in range(0, section.size / 24):
ofs = self.data.read_uint64(section.offset + (i * 24))
info = self.data.read_uint64(section.offset + (i * 24) + 8)
sym = info >> 32
reloc_type = info & 0xff
if reloc_type == 7: # R_X86_64_JUMP_SLOT
self.plt[ofs] = self.dynamic_symbol_table[sym].name
self.symbols_by_name[self.decorate_plt_name(self.dynamic_symbol_table[sym].name)] = ofs
self.symbols_by_addr[ofs] = self.decorate_plt_name(self.dynamic_symbol_table[sym].name)
def read(self, ofs, len):
result = ""
while len > 0:
cur = None
for i in self.program_headers:
if ((ofs >= i.virtual_addr) and (ofs < (i.virtual_addr + i.memory_size))) and (i.memory_size != 0):
cur = i
if cur == None:
break
prog_ofs = ofs - cur.virtual_addr
mem_len = cur.memory_size - prog_ofs
file_len = cur.file_size - prog_ofs
if mem_len > len:
mem_len = len
if file_len > len:
file_len = len
if file_len <= 0:
result += "\x00" * mem_len
len -= mem_len
ofs += mem_len
continue
result += self.data.read(cur.offset + prog_ofs, file_len)
len -= file_len
ofs += file_len
return result
def next_valid_addr(self, ofs):
result = -1
for i in self.program_headers:
if (i.virtual_addr >= ofs) and (i.memory_size != 0) and ((result == -1) or (i.virtual_addr < result)):
result = i.virtual_addr
return result
def get_modification(self, ofs, len):
result = []
while len > 0:
cur = None
for i in self.program_headers:
if ((ofs >= i.virtual_addr) and (ofs < (i.virtual_addr + i.memory_size))) and (i.memory_size != 0):
cur = i
if cur == None:
break
prog_ofs = ofs - cur.virtual_addr
mem_len = cur.memory_size - prog_ofs
file_len = cur.file_size - prog_ofs
if mem_len > len:
mem_len = len
if file_len > len:
file_len = len
if file_len <= 0:
| result += [DATA_ORIGINAL] * mem_len | 1,032 | lcc_e | python | null | 9d1ef6b62bf709d7490cbf236f15ee8ef609e6aff8c231cf |
|
"""Imports:
defaultdict: for multidimensional dictionaries (implicitly instantiating
the nested dictionaries as the dimensions are accessed)
operator: sorting dictionaries
os: writing the collection out to disk
"""
from collections import defaultdict
import operator
import os
class TrackCollection(object):
"""A structure sorting tracks by artist and album
Attributes:
file_count: int number of files in this collection
collection: 2-dimensional defaultdict of all tracks namespaced by artist
then album
"""
file_count = 0
def __init__(self):
def create_multidimensional_dict(n, dict_type):
"""Recursive function to create a multidimensional defaultdict
Args:
n: int dimensionality of the defaultdict
dict_type: type of the elements
Returns:
A multidimensional defaultdict"""
if n < 1:
return dict_type()
return defaultdict(lambda: create_multidimensional_dict(n-1, dict_type))
# Create one.
self.collection = create_multidimensional_dict(2, list)
def __str__(self):
s = "---- Album Dictionary Mappings ----\n"
# List artists and sort
artists = self.collection.keys()
artists.sort()
for artist in artists:
# List albums and sort
albums = self.collection[artist].keys()
albums.sort(key=lambda k2, k1=artist: self.collection[k1][k2][0].final.year)
for album in albums:
s += "[%s][%s]\n" % (artist, album)
for song in self.collection[artist][album]:
s += " %02d %s\n" % (song.final.track, song.final.title)
s += "-----------------------------------"
return s
def add(self, track):
"""Adds a TrackFile to the collection.
Args:
track: A TrackFile to add. Must be finalised at this point (for
indexing purposes).
Returns:
None
Raises:
Exception: The given track was not finalised.
"""
# TODO Compilations are going to wreak havoc here too, see note on
# remove_duplicates.
if not track.finalised:
raise Exception("TrackCollection cannot add a non-finalised track")
if not self.collection[track.final.artist][track.final.album]:
self.collection[track.final.artist][track.final.album] = []
self.collection[track.final.artist][track.final.album].append(track)
self.file_count += 1
def remove_duplicates(self, warnings=None, report_progress=None):
"""Look for duplicate songs and remove them.
Duplicate artists and albums are not a problem - you need a music file
(itself a song) to contain the data to have generated them, so you
either have duplicate songs, or different songs with slightly different
artist/album names. This should be corrected by the TrackFile's
compression step. If it wasn't properly picked up then (i.e. the names
were too dissimilar) we have no further information at this point so we
can't fix it.
Args:
report_progress: Optional two argument function to report progress
where the first argument is the total number of items and the
second argument is the completed number of items.
Returns:
None
"""
# TODO: Compilations are going to go crazy here... revist this later,
# probably with a TrackFile flag for (probable) compilation tracks.
processed_count = 0
for artist in self.collection:
for album in self.collection[artist]:
duplicate_tracker = {}
to_be_removed = []
for song in self.collection[artist][album]:
title = song.final.title
# If a track with this title already exists within this
# artist/album tuple, mark it as a duplicate (and optionally
# generate a warning
if title in duplicate_tracker:
duplicate = duplicate_tracker[title]
if warnings is not None and ( \
duplicate.final.track != song.final.track or \
duplicate.final.year != song.final.year):
warnings.append('Found songs with the same artist, ' \
'album and title but differing track or year:\n' \
' %s\n %s\n %s\n %s' % ( \
duplicate, duplicate.file_path, \
song, song.file_path))
to_be_removed.append(song)
else:
duplicate_tracker[title] = song
processed_count += 1
if report_progress:
report_progress(self.file_count, processed_count)
for song in to_be_removed:
self.collection[artist][album].remove(song)
self.file_count -= len(to_be_removed)
def standardise_album_tracks(self, warnings=None, report_progress=None):
"""Standardises track data between tracks within each album.
Takes a vote between tracks within albums to standardise information on
the album year and track numbering.
Args:
report_progress: Optional two argument function to report progress
where the first argument is the total number of items and the
second argument is the completed number of items.
Returns:
None
"""
# TODO Compilations are going to wreak havoc here too, see note on
# remove_duplicates.
processed_count = 0
for artist in self.collection:
for album in self.collection[artist]:
# First collect the number of times each different album year
# data appears. Ideally all tracks should have the same year.
album_year_votes = {}
for song in self.collection[artist][album]:
if song.final.year in album_year_votes:
album_year_votes[song.final.year] += 1
else:
album_year_votes[song.final.year] = 1
processed_count += 1
if report_progress:
report_progress(self.file_count, processed_count)
# If there is more than one album year listed, standardise. A
# good argument could be made for any number of strategies for
# standardising. Currently the majority vote takes it, but the
# latest 'sensible' year would also be an idea.
if len(album_year_votes.keys()) > 1:
sorted_album_year_votes = sorted(album_year_votes.iteritems(),
key=operator.itemgetter(1),
reverse=True)
if sorted_album_year_votes[0][0] != 0:
correct_year = sorted_album_year_votes[0][0]
else:
correct_year = sorted_album_year_votes[1][0]
if warnings is not None:
warnings.append('Multiple album years for %s ' \
'by %s: %s. Using %d.' \
% (album, artist, str(sorted_album_year_votes), correct_year))
for song in self.collection[artist][album]:
song.final.year = correct_year
def sort_songs_by_track(self):
"""Sort the songs in the lists by their track numbers.
Returns:
None
"""
for artist in self.collection:
for album in self.collection[artist]:
self.collection[artist][album].sort(key=lambda x: x.final.track)
def create_new_filesystem(self, new_path):
"""Creates a collection starting from a root directory.
Args:
new_path: The path to recursively search for the collection within.
Returns:
None
"""
os.mkdir(new_path)
for artist in self.collection:
artist_subpath = '/%s' % (artist)
os.mkdir(new_path + artist_subpath)
for album in self.collection[artist]:
if self.collection[artist][album][0].final.year != 0:
album_subpath = '/[%d] %s' % (self.collection[artist][album][0].final.year, album)
else:
album_subpath = '/%s' % (album)
os.mkdir(new_path + artist_subpath + album_subpath)
| for song in self.collection[artist][album]: | 826 | lcc_e | python | null | 50c8a624b72c93ccf1b9dc94cc56cefefc09ed423276260c |
|
# -*- coding: utf-8 -*-
########################## Copyrights and license ############################
# #
# Copyright 2011-2015 Christian Lupien <christian.lupien@usherbrooke.ca> #
# #
# This file is part of pyHegel. http://github.com/lupien/pyHegel #
# #
# pyHegel is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# pyHegel is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with pyHegel. If not, see <http://www.gnu.org/licenses/>. #
# #
##############################################################################
from __future__ import absolute_import
import numpy as np
import random
import time
from scipy.optimize import brentq as brentq_rootsolver
from .. import traces
from ..instruments_base import BaseInstrument, visaInstrument, visaInstrumentAsync,\
BaseDevice, scpiDevice, MemoryDevice, Dict_SubDevice, ReadvalDev,\
ChoiceBase, ChoiceMultiple, ChoiceMultipleDep, ChoiceSimpleMap,\
ChoiceStrings, ChoiceIndex,\
make_choice_list, _fromstr_helper,\
decode_float64, visa_wrap, locked_calling
from ..types import dict_improved
from ..instruments_registry import register_instrument, register_usb_name, register_idn_alias
from .logical import FunctionDevice
#######################################################
## Yokogawa source
#######################################################
register_usb_name('Yokogawa Electric Corporation', 0x0B21)
@register_instrument('YOKOGAWA', 'GS210', usb_vendor_product=[0x0B21, 0x0039])
#@register_instrument('YOKOGAWA', 'GS210', '1.05')
#@register_instrument('YOKOGAWA', 'GS210', '1.02')
class yokogawa_gs200(visaInstrument):
# TODO: implement multipliers, units. The multiplier
# should be the same for all instruments, and be stripped
# before writing and going to the cache (in BaseDevice)
# This is probably not needed. Just use 1e3
# case insensitive
multipliers = ['YO', 'ZE', 'EX', 'PE', 'T', 'G', 'MA', 'K', 'M', 'U', 'N', 'P',
'F', 'A', 'Z', 'Y']
multvals = [1e24, 1e21, 1e18, 1e15, 1e12, 1e9, 1e6, 1e3, 1e-3, 1e-6, 1e-9, 1e-12,
1e-15, 1e-18, 1e-21, 1e-24]
def init(self, full=False):
# clear event register, extended event register and error queue
self.clear()
def _current_config(self, dev_obj=None, options={}):
return self._conf_helper('function', 'range', 'level', 'output_en', options)
def _create_devs(self):
#self.level_2 = wrapDevice(self.levelsetdev, self.levelgetdev, self.levelcheck)
self.function = scpiDevice(':source:function', choices=ChoiceStrings('VOLT', 'CURRent')) # use 'voltage' or 'current'
# voltage or current means to add V or A in the string (possibly with multiplier)
self.range = scpiDevice(':source:range', str_type=float, setget=True) # can be a voltage, current, MAX, MIN, UP or DOWN
#self.level = scpiDevice(':source:level') # can be a voltage, current, MAX, MIN
self.voltlim = scpiDevice(':source:protection:voltage', str_type=float, setget=True) #voltage, MIN or MAX
self.currentlim = scpiDevice(':source:protection:current', str_type=float, setget=True) #current, MIN or MAX
self.output_en = scpiDevice('OUTPut', str_type=bool)
self._devwrap('level', setget=True)
self.alias = self.level
# This needs to be last to complete creation
super(type(self),self)._create_devs()
def _level_check(self, val):
rnge = 1.2*self.range.getcache()
if self.function.getcache()=='CURR' and rnge>.2:
rnge = .2
if abs(val) > rnge:
raise ValueError, self.perror('level is invalid')
def _level_getdev(self):
return float(self.ask(':source:level?'))
def _level_setdev(self, val):
# used %.6e instead of repr
# repr sometimes sends 0.010999999999999999
# which the yokogawa understands as 0.010 instead of 0.011
self.write(':source:level %.6e'%val)
#######################################################
## Stanford Research SR830 Lock-in Amplifier
#######################################################
#@register_instrument('Stanford_Research_Systems', 'SR830', 'ver1.07 ')
@register_instrument('Stanford_Research_Systems', 'SR830', alias='SR830 LIA')
class sr830_lia(visaInstrument):
"""
Don't forget to set the async_wait to some usefull values.
might do set(sr1.async_wait, 1.)
when using 24dB/oct, 100ms filter.
You can use find_n_time and find_fraction to set the time.
For example: set(sr1.async_wait, sr1.find_n_time(.99,sec=True))
To read more than one channel at a time use readval/fetch(snap)
Otherwise you can use x, y, t, theta
"""
# TODO setup snapsel to use the names instead of the numbers
_snap_type = {1:'x', 2:'y', 3:'R', 4:'theta', 5:'Aux_in1', 6:'Aux_in2',
7:'Aux_in3', 8:'Aux_in4', 9:'Ref_Freq', 10:'Ch1', 11:'Ch2'}
def init(self, full=False):
# This empties the instrument buffers
self._dev_clear()
def _check_snapsel(self,sel):
if not (2 <= len(sel) <= 6):
raise ValueError, 'snap sel needs at least 2 and no more thant 6 elements'
def _snap_getdev(self, sel=[1,2]):
# sel must be a list
self._check_snapsel(sel)
sel = map(str, sel)
return decode_float64(self.ask('snap? '+','.join(sel)))
def _snap_getformat(self, sel=[1,2], **kwarg):
self._check_snapsel(sel)
headers = [ self._snap_type[i] for i in sel]
d = self.snap._format
d.update(multi=headers, graph=range(len(sel)))
return BaseDevice.getformat(self.snap, sel=sel, **kwarg)
def auto_offset(self, ch='x'):
"""
commands the auto offset for channel ch
which can be 'x', 'y' or 'r'
"""
choices=ChoiceIndex(['x', 'y', 'r'], offset=1)
ch_i = choices.tostr(ch)
self.write('aoff '+ch_i)
def _current_config(self, dev_obj=None, options={}):
#base = ['async_delay=%r'%self.async_delay]
return self._conf_helper('async_delay','async_wait', 'freq', 'sens', 'srclvl', 'harm', 'phase', 'timeconstant', 'filter_slope',
'sync_filter', 'reserve_mode',
'offset_expand_x', 'offset_expand_y', 'offset_expand_r',
'input_conf', 'grounded_conf', 'dc_coupled_conf', 'linefilter_conf', options)
def _create_devs(self):
self.freq = scpiDevice('freq', str_type=float, setget=True, min=0.001, max=102e3)
sens = ChoiceIndex(make_choice_list([2,5,10], -9, -1), normalize=True)
self.sens = scpiDevice('sens', choices=sens, doc='Set the sensitivity in V (for currents it is in uA)')
self.oauxi1 = scpiDevice(getstr='oaux? 1', str_type=float, setget=True)
self.srclvl = scpiDevice('slvl', str_type=float, min=0.004, max=5., setget=True)
self.harm = scpiDevice('harm', str_type=int, min=1, max=19999)
self.phase = scpiDevice('phas', str_type=float, min=-360., max=729.90, setget=True)
timeconstants = ChoiceIndex(make_choice_list([10, 30], -6, 3), normalize=True)
self.timeconstant = scpiDevice('oflt', choices=timeconstants)
filter_slopes=ChoiceIndex([6, 12, 18, 24])
self.filter_slope = scpiDevice('ofsl', choices=filter_slopes, doc='in dB/oct\n')
self.sync_filter = scpiDevice('sync', str_type=bool)
self.x = scpiDevice(getstr='outp? 1', str_type=float, trig=True)
self.y = scpiDevice(getstr='outp? 2', str_type=float, trig=True)
self.r = scpiDevice(getstr='outp? 3', str_type=float, trig=True)
off_exp = ChoiceMultiple(['offset_pct', 'expand_factor'], [float, ChoiceIndex([1, 10 ,100])])
self.offset_expand_x = scpiDevice('oexp 1,{val}', 'oexp? 1', choices=off_exp, setget=True)
self.offset_expand_y = scpiDevice('oexp 2,{val}', 'oexp? 2', choices=off_exp, setget=True)
self.offset_expand_r = scpiDevice('oexp 3,{val}', 'oexp? 3', choices=off_exp, setget=True)
self.theta = scpiDevice(getstr='outp? 4', str_type=float, trig=True)
input_conf = ChoiceIndex(['A', 'A-B', 'I1', 'I100'])
self.input_conf = scpiDevice('isrc', choices=input_conf, doc='For currents I1 refers to 1 MOhm, I100 refers to 100 MOhm\n')
self.grounded_conf = scpiDevice('ignd', str_type=bool)
self.dc_coupled_conf = scpiDevice('icpl', str_type=bool)
reserve_mode = ChoiceIndex(['high', 'normal', 'low'])
self.reserve_mode = scpiDevice('rmod', choices=reserve_mode)
linefilter = ChoiceIndex(['none', 'line', '2xline', 'both'])
self.linefilter_conf = scpiDevice('ilin', choices=linefilter, doc='Selects the notch filters')
# status: b0=Input/Reserver ovld, b1=Filter ovld, b2=output ovld, b3=unlock,
# b4=range change (accross 200 HZ, hysteresis), b5=indirect time constant change
# b6=triggered, b7=unused
self.status_byte = scpiDevice(getstr='LIAS?', str_type=int)
self._devwrap('snap', trig=True, doc="""
This device can be called snap or fetch (they are both the same)
This device obtains simultaneous readings from many inputs.
To select the inputs, use the parameter
sel
which is [1,2] by default.
The numbers are taken from the following dictionnary:
%r
"""%self._snap_type)
self.fetch = self.snap
self.readval = ReadvalDev(self.fetch)
self.alias = self.readval
# This needs to be last to complete creation
super(type(self),self)._create_devs()
def get_error(self):
"""
returns a byte of bit flags
bit 0 (1): unused
bit 1 (2): Backup error
bit 2 (4): RAM error
bit 3 (8): Unused
bit 4 (16): Rom error
bit 5 (32): GPIB error
bit 6 (64): DSP error
bit 7 (128): Math Error
"""
return int(self.ask('ERRS?'))
def find_fraction(self, n_time_constant, n_filter=None, time_constant=None, sec=False):
"""
Calculates the fraction of a step function that is obtained after
n_time_constant*time_constant time when using n_filter
n_filter is the order of the filter: 1, 2, 3 ...
By default time_constant and n_filter are the current ones
When sec is True the input time is in sec, not in time_constants
"""
if n_filter == None:
n_filter = self.filter_slope.getcache()
n_filter = self.filter_slope.choices.index(n_filter)+1
if time_constant == None:
time_constant = self.timeconstant.getcache()
if sec:
n_time_constant /= time_constant
t = n_time_constant
et = np.exp(-t)
if n_filter == 1:
return 1.-et
elif n_filter == 2:
return 1.-et*(1.+t)
# elif n_filter == 3:
# return 1.-et*(1.+t+0.5*t**2)
# elif n_filter == 4:
# return 1.-et*(1.+t+0.5*t**2+t**3/6.)
else:
# general formula: 1-exp(-t)*( 1+t +t**/2 + ... t**(n-1)/(n-1)!) )
m = 1.
tt = 1.
for i in range(1, n_filter):
tt *= t/i
m += tt
return 1.-et*m
def find_n_time(self, frac=.99, n_filter=None, time_constant=None, sec=False):
"""
Does the inverse of find_fraction.
Here, given a fraction, we find the number of time_constants needed to wait.
When sec is true, it returs the time in sec not in number of time_constants.
"""
if n_filter == None:
n_filter = self.filter_slope.getcache()
n_filter = self.filter_slope.choices.index(n_filter)+1
if time_constant == None:
time_constant = self.timeconstant.getcache()
func = lambda x: self.find_fraction(x, n_filter, time_constant)-frac
n_time = brentq_rootsolver(func, 0, 100)
if sec:
return n_time*time_constant
else:
return n_time
#######################################################
## Stanford Research SR384 RF source
#######################################################
#@register_instrument('Stanford Research Systems', 'SG384', 'ver1.02.0E')
@register_instrument('Stanford Research Systems', 'SG384', alias='SG384 RF source')
class sr384_rf(visaInstrument):
# This instruments needs to be on local state or to pass through local state
# after a local_lockout to actually turn off the local key.
# allowed units: amp: dBm, rms, Vpp; freq: GHz, MHz, kHz, Hz; Time: ns, us, ms, s
def init(self, full=False):
# This clears the error state
self.clear()
def _current_config(self, dev_obj=None, options={}):
return self._conf_helper('freq', 'en_lf', 'amp_lf_dbm', 'offset_low',
'en_rf', 'amp_rf_dbm', 'en_hf', 'amp_hf_dbm',
'phase', 'mod_en', options)
def _create_devs(self):
self.freq = scpiDevice('freq',str_type=float, min=1e-6, max=8.1e9)
self.offset_low = scpiDevice('ofsl',str_type=float, min=-1.5, max=+1.5) #volts
self.amp_lf_dbm = scpiDevice('ampl',str_type=float, min=-47, max=14.96) # all channel output power calibrated to +13 dBm only, manual says 15.5 for low but intruments stops at 14.96
self.amp_rf_dbm = scpiDevice('ampr',str_type=float, min=-110, max=16.53)
self.amp_hf_dbm = scpiDevice('amph',str_type=float, min=-10, max=16.53) # doubler
self.en_lf = scpiDevice('enbl', str_type=bool) # 0 is off, 1 is on, read value depends on freq
self.en_rf = scpiDevice('enbr', str_type=bool) # 0 is off, 1 is on, read value depends on freq
self.en_hf = scpiDevice('enbh', str_type=bool) # 0 is off, 1 is on, read value depends on freq
self.phase = scpiDevice('phas',str_type=float, min=-360, max=360) # deg, only change by 360
self.mod_en = scpiDevice('modl', str_type=bool) # 0 is off, 1 is on
# This needs to be last to complete creation
super(type(self),self)._create_devs()
def get_error(self):
"""
Pops last error
## Execution Errors
0: No error
10: Illegal value
11: Illegal Mode
12: Not allowed
13: Recall Failed
14: No clock option
15: No RF doubler option
16: No IQ option
17: Failed self test
## Query Errors
30: Lost data
32: No listener
## Device dependent errors
40: Failed ROM check
42: Failed EEPROM check
43: Failed FPGA check
44: Failed SRAM check
45: Failed GPIB check
46: Failed LF DDS check
47: Failed RF DDS check
48: Failed 20 MHz PLL
49: Failed 100 MHz PLL
50: Failed 19 MHz PLL
51: Failed 1 GHz PLL
52: Failed 4 GHz PLL
53: Failed DAC
## Parsing errors
110: Illegal command
111: Undefined command
112: Illegal query
113: Illegal set
114: Null parameter
115: Extra parameters
116: Missing parameters
117: Parameter overflow
118: Invalid floating point number
120: Invalid Integer
121: Integer overflow
122: Invalid Hexadecimal
126: Syntax error
127: Illegal units
128: Missing units
## Communication errors
170: Communication error
171: Over run
## Other errors
254: Too many errors
"""
return int(self.ask('LERR?'))
#######################################################
## Stanford Research SR780 2 channel network analyzer
#######################################################
#@register_instrument('Stanford_Research_Systems', 'SR780', 'ver116')
@register_instrument('Stanford_Research_Systems', 'SR780', alias='SR780 network analyser')
class sr780_analyzer(visaInstrumentAsync):
"""
This controls a 2 channel network analyzer
It currently only handles the FFT measurement group (not octave or swept sine).
Markers are not handled. Only sine sources are handled.
Useful devices:
fetch, readval
dump
current_display
current_channel
freq_start, freq_stop, freq_center, freq_span
window_type
average_en
average_type
average_mode
average_count_requested
async_wait (needed for exponential average, not for linear)
Useful methods:
start
get_xscale
Changing a setup should be done in the following order
meas_grp
meas
meas_view
unit
"""
def __init__(self, *args, **kwargs):
super(sr780_analyzer, self).__init__(*args, **kwargs)
# The parant __init__ overrides our selection of 'wait' mode
# in _async_detect_setup(reset=True) in init. So lets set it back
self._async_mode = 'wait'
def init(self, full=False):
# This empties the instrument buffers
self._dev_clear()
# This clears the error state, and status/event flags
self.clear()
if full:
self._async_sre_flag = 2
self.write('DSPE 0;*sre 2') # Display flags
self._async_detect_setup(reset=True)
#self._async_tocheck = 0
#self._async_use_delay = False
self.visa.write_termination = '\n'
#self.visa.term_chars='\n'
# The above turned on detection of termchar on read. This is not good for
# raw reads so turn it off.
# visa.vpp43.set_attribute(self.visa.vi, visa.VI_ATTR_TERMCHAR_EN, visa.VI_FALSE)
self.write('OUTX 0') # Force interface to be on GPIB, in case it is not anymore (problem with dump function)
def _async_select(self, devs=[]):
# This is called during init of async mode.
self._async_detect_setup(reset=True)
for dev, kwarg in devs:
if dev in [self.fetch, self.readval]:
disp = kwarg.get('disp', None)
self._async_detect_setup(disp=disp)
def _async_detect_setup(self, disp=None, reset=False):
if reset:
# make the default async_mode is 'wait' so that if
# _async_tocheck == 0, we just turn on wait.
# This could happen when using run_and_wait before anything is set
# Otherwise, getasync and readval both call async_select to setup
# the mode properly (_async_mode and_async_tocheck).
self._async_tocheck = 0
self._async_mode = 'wait'
return
self._async_mode = 'srq'
disp_org = self.current_display.getcache()
if disp==None:
disp = disp_org
self.current_display.set(disp)
# 0x2=A-linear avg, 0x4=A-settled, 0x200=B-linear, 0x400=B-settled
if self.average_en.get(disp=disp):
if self.average_type.get() in ['linear', 'FixedLength']:
tocheck = 0x2
else:
self._async_mode = 'wait+srq'
tocheck = 0x4
else:
tocheck = 0x4
if disp == 'B':
tocheck <<= 8
self._async_tocheck |= tocheck
self.current_display.set(disp_org)
def _async_trigger_helper(self):
# We are setup so that run_and_wait resuses the last config which starts
# with a simple wait (could be invalid now if averaging is changed on the instrument).
# Should not be a big deal since that is not a normal use of it.
self._cum_display_status = 0
self.write('PAUS') # make sure we are not scanning anymore.
self.get_display_status() # reset the display status flags
self.write('DSPE %i'%self._async_tocheck)
self.write('STRT')
def _get_esr(self):
# This disables the get_esr in the async routines.
return 0
@locked_calling
def start(self):
"""
Same as pressing Start/Reset button.
"""
self._async_trigger_helper()
def _async_detect(self, max_time=.5): # 0.5 s max by default
ret = super(sr780_analyzer, self)._async_detect(max_time)
if self._async_mode == 'wait':
# pure wait
return ret
if not ret:
# Did not receive SRQ or wait long enough
return False
# Received SRQ, check if we are done
disp_st = self.get_display_status()
self._cum_display_status |= disp_st
tocheck = self._async_tocheck
#print 'tocheck %0x %0x %0x'%(tocheck, self._cum_display_status, disp_st)
if self._cum_display_status&tocheck == tocheck:
self.write('DSPE 0')
self._cum_display_status = 0
return True # We are done!
return False
def _fetch_getformat(self, **kwarg):
xaxis = kwarg.get('xaxis', True)
if xaxis:
multi = ('freq', 'data')
else:
multi = True
fmt = self.fetch._format
fmt.update(multi=multi, graph=[], xaxis=xaxis)
return BaseDevice.getformat(self.fetch, **kwarg)
def _fetch_getdev(self, disp=None, xaxis=True):
"""
Optional parameter: disp and xaxis
-disp: To select which display to read.
-xaxis: when True(default), the first column of data is the xaxis
For faster transfer, make the view and unit the same type (both linear or both log)
It is STRONGLY recommended to use linear averaging.
For exponential averaging you need to specify a wait time with async_wait
i.e. set(srnet.async_wait,3) # for 3 seconds
"""
# The instrument has 5 Traces that can be used for memory.
# There is REFY? d,j to obtain pint j (0..length-1) in ref curve of display d
# DSPN? d to obtain lenght of data set
if disp != None:
self.current_display.set(disp)
disp = self.current_display.getcache()
disp = self.current_display._tostr(disp)
# DSPY returns ascii but is slower than DSPB (binary)
# TODO implement handling of nyquist and nichols plot which return 2 values per datapoint.
# TODO handle waterfalls: dswb
data = self.ask('DSPB? %s'%disp, raw=True)
ret = np.fromstring(data, np.float32)
if xaxis:
ret = ret = np.asarray([self.get_xscale(), ret])
return ret
def _current_config(self, dev_obj=None, options={}):
if options.has_key('disp'):
self.current_display.set(options['disp'])
want_ch = 1
meas = self.meas.getcache()
# This does not handle Coherence, CrossSpectrum F2/F1 ...
if meas[-1] == '2' and meas[-4:-1] != 'ser':
want_ch = 2
orig_ch = self.current_channel.getcache()
if want_ch != orig_ch:
self.current_channel.set(want_ch)
conf = self._conf_helper('current_display', 'current_channel',
'input_source', 'input_mode', 'input_grounding', 'input_coupling',
'input_range_dBV', 'input_autorange_en', 'input_autorange_mode', 'input_antialiasing_en',
'input_aweight_en', 'input_auto_offset_en', 'input_eng_unit_en', 'input_eng_label',
'input_eng_unit_scale', 'input_eng_unit_user',
'freq_start', 'freq_stop', 'freq_resolution', 'freq_baseline', 'window_type',
'meas_group', 'meas', 'meas_view',
'meas_unit', 'dBm_ref', 'disp_PSD_en', 'disp_transducer_unit_mode',
'disp_live_en',
'average_en', 'average_mode', 'average_type', 'average_count_requested',
'average_increment_pct', 'average_overload_reject_en', 'average_preview_type',
'source_en', 'source_type', 'source_freq1', 'source_ampl1_V',
'source_offset_V', 'source_freq2', 'source_ampl2_V', 'async_wait',
options)
if want_ch != orig_ch:
self.current_channel.set(orig_ch)
return conf
def _create_devs(self):
display_sel = ChoiceIndex(['A', 'B']) # also both=2
self.current_display = MemoryDevice('A', choices=display_sel)
self.current_channel = MemoryDevice(1, choices=[1, 2])
self.freq_baseline = scpiDevice('FBAS 2,{val}', 'FBAS? 0', choices=ChoiceIndex([100e3, 102.4e3]))
self.dBm_ref = scpiDevice('DBMR 2,{val}', 'DBMR? 2', str_type=float, min=0)
self.source_en = scpiDevice('SRCO', str_type=bool)
self.source_type = scpiDevice('STYP', choices=ChoiceIndex(['Sine', 'Chirp', 'Noise', 'Arbitrary']))
self.source_freq1 = scpiDevice('S1FR', str_type=float)
self.source_ampl1_V = scpiDevice('S1AM', str_type=float)
self.source_offset_V = scpiDevice('SOFF', str_type=float)
self.source_freq2 = scpiDevice('S2FR', str_type=float)
self.source_ampl2_V = scpiDevice('S2AM', str_type=float)
self.input_source = scpiDevice('ISRC', choices=ChoiceIndex(['Analog', 'Capture']))
def devChOption(*arg, **kwarg):
options = kwarg.pop('options', {}).copy()
options.update(ch=self.current_channel)
app = kwarg.pop('options_apply', ['ch'])
kwarg.update(options=options, options_apply=app)
return scpiDevice(*arg, **kwarg)
self.input_mode = devChOption('I{ch}MD', choices=ChoiceIndex(['Analog', 'Capture']))
self.input_grounding = devChOption('I{ch}GD', choices=ChoiceIndex(['Float', 'Ground']))
self.input_coupling = devChOption('I{ch}GD', choices=ChoiceIndex(['DC', 'AC', 'ICP']))
self.input_range_dBV = devChOption('I{ch}RG', str_type=int, choices=range(-50, 36, 2))
self.input_autorange_en = devChOption('A{ch}RG', str_type=bool)
self.input_autorange_mode = devChOption('I{ch}AR', choices=ChoiceIndex(['Normal', 'Tracking']))
self.input_antialiasing_en = devChOption('I{ch}AF', str_type=bool)
self.input_aweight_en = devChOption('I{ch}AW', str_type=bool)
self.input_auto_offset_en = scpiDevice('IAOM', str_type=bool)
self.input_eng_unit_en = devChOption('EU{ch}M', str_type=bool)
self.input_eng_label = devChOption('EU{ch}L', str_type=ChoiceIndex(['m/s2', 'm/s', 'm', 'in/s2', 'in/s', 'in', 'mil', 'g', 'kg', 'lbs', 'N', 'dyne', 'Pas', 'bar', 'USER']))
self.input_eng_unit_scale = devChOption('EU{ch}V', str_type=float, doc='number of eng.unit/Volt')
self.input_eng_unit_user = devChOption('EU{ch}U', str_type=str)
def devDispOption(*arg, **kwarg):
options = kwarg.pop('options', {}).copy()
options.update(disp=self.current_display)
app = kwarg.pop('options_apply', ['disp'])
kwarg.update(options=options, options_apply=app)
return scpiDevice(*arg, **kwarg)
self.freq_span = devDispOption('FSPN {disp},{val}', 'FSPN? {disp}', str_type=float, setget=True)
self.freq_start = devDispOption('FSTR {disp},{val}', 'FSTR? {disp}', str_type=float, setget=True, min=0, max=102.4e3)
self.freq_stop = devDispOption('FEND {disp},{val}', 'FEND? {disp}', str_type=float, setget=True, min=0, max=102.4e3)
self.freq_center = devDispOption('FCTR {disp},{val}', 'FCTR? {disp}', str_type=float, setget=True, min=0, max=102.4e3)
resol_sel = ChoiceIndex([100, 200, 400, 800])
self.freq_resolution = devDispOption('FLIN {disp},{val}', 'FLIN? {disp}', choices=resol_sel)
mgrp_sel = ChoiceIndex(['FFT', 'Octave', 'Swept Sine'])
self.meas_group = devDispOption('MGRP {disp},{val}', 'MGRP? {disp}', choices=mgrp_sel)
meas_sel = ChoiceIndex(['FFT1', 'FFT2', 'Time1', 'Time2', 'WindowedTime1', 'WindowedTime2',
'Orbit', 'Coherence', 'CrossSpectrum', '<F2/F1>', '<F2>/<F1>',
'AutoCorr1', 'AutoCorr2', 'CaptureBuffer1', 'CaptureBuffer2',
'FFTuser1', 'FFTuser2', 'FFTuser3', 'FFTuser4', 'FFTuser5',
'Octave1', 'Octave2', 'OctaveCapBuff1', 'OctaveCapBuff2',
'OctaveUser1', 'OctaveUser2', 'OctaveUser3', 'OctaveUser4', 'OctaveUser5',
'SweptSpectrum1', 'SweptSpectrum2', 'SweptCross', 'SweptTransferFunction',
'SweptUser1', 'SweptUser2', 'SweptUser3', 'SweptUser4', 'SweptUser5'])
self.meas = devDispOption('MEAS {disp},{val}', 'MEAS? {disp}', choices=meas_sel)
view_sel = ChoiceIndex(['LogMag', 'LinMag', 'MagSquared', 'Real', 'Imag', 'Phase', 'UnWrapPhase', 'Nyquist', 'Nichols'])
self.meas_view = devDispOption('VIEW {disp},{val}', 'VIEW? {disp}', choices=view_sel)
unit_sel = ChoiceIndex(['Vpk', 'Vrms', 'Vpk2', 'Vrms2', 'dBVpk', 'dBVrms', 'dBm', 'dBspl', 'deg', 'rad', 'Units', 'dB'])
self.meas_unit = devDispOption('UNIT {disp},{val}', 'UNIT? {disp}', choices=unit_sel)
self.disp_live_en = devDispOption('DISP {disp},{val}', 'DISP? {disp}', str_type=bool)
self.disp_log_xscale = devDispOption('XAXS {disp},{val}', 'XAXS? {disp}', str_type=bool)
self.disp_PSD_en = devDispOption('PSDU {disp},{val}', 'PSDU? {disp}', str_type=bool, doc='Wether PSD (power spectral density) is enabled.')
self.disp_transducer_unit_mode = devDispOption('TDRC {disp},{val}', 'TDRC? {disp}', choices=ChoiceIndex(['acceleration', 'velocity', 'displacement']))
self.average_en = devDispOption('FAVG {disp},{val}', 'FAVG? {disp}', str_type=bool)
self.average_mode = devDispOption('FAVM {disp},{val}', 'FAVM? {disp}', choices=ChoiceIndex(['vector', 'RMS', 'PeakHold']))
self.average_type = devDispOption('FAVT {disp},{val}', 'FAVT? {disp}', choices=ChoiceIndex(['linear', 'exponential', 'FixedLength', 'continuous']))
self.average_count_requested = devDispOption('FAVN {disp},{val}', 'FAVN? {disp}', str_type=int, min=2, max=32767)
self.average_count = devDispOption(getstr='NAVG? {disp}', str_type=int)
self.average_increment_pct = devDispOption('FOVL {disp},{val}', 'FOVL? {disp}', str_type=float, min=0, max=300)
self.average_overload_reject_en = scpiDevice('FREJ 2,{val}', 'FREJ? 0', str_type=bool)
self.average_preview_type = devDispOption('PAVO {disp},{val}', 'PAVO? {disp}', choices=ChoiceIndex(['off', 'manual', 'timed']))
self.window_type = devDispOption('FWIN {disp},{val}', 'FWIN? {disp}', choices=ChoiceIndex(['uniform', 'hanning', 'flattop', 'BMH', 'kaiser', 'force', 'exponential', 'user', '-T/2..T/2', '0..T/2', '-T/4..T/4',]))
self._devwrap('fetch', autoinit=False, trig=True)
self.readval = ReadvalDev(self.fetch)
self._devwrap('dump', autoinit=False)
# This needs to be last to complete creation
super(type(self),self)._create_devs()
@locked_calling
def get_xscale(self):
# only works for fft
start = self.freq_start.getcache()
stop = self.freq_stop.getcache()
npoints = self.freq_resolution.getcache() + 1 # could also use DSPN? d
return np.linspace(start, stop, npoints)
def _dump_getformat(self, ps=True, **kwarg):
fmt = self.dump._format
if ps:
binfmt = '.ps'
else:
binfmt = '.gif'
fmt.update(bin=binfmt)
return BaseDevice.getformat(self.dump, **kwarg)
def _dump_getdev(self, ps=True, area='all'):
"""
options are ps, area
-ps: when True (default) returns a postscript object, otherwise returns a GIF file
-area: used for GIF files, one of 'graph', 'menu', 'status' or 'all'(default)
"""
# Reading data is tricky because the instrument does not send
# EOI on its last byte so we either need to detect the ending comment
# of the postscript or wait for the first timeout to occur for
# the bitmap.
# Also when the remote output is set to GPIB we do no receive the last byte.
# So we need to switch the output to RS232 first.
area_sel = dict(graph=0, menu=1, status=2, all=3)
# POUT sets hardware print key to bitmap or vector
# PDST 3 selects GPIB
# PCIC 0 selects host controller
# PLTP selects postscript
# PRTP selects GIF
r=''
old_to = self.set_timeout
self.set_timeout=.5 # useful for bitmap mode since we need to wait for timeout
self.write('OUTX 1') # Go to RS232 interface
if ps:
self.write('POUT 1;PDST 3;PCIC 0;PLTP 1;PLOT')
while r[-11:] != '%%Trailer\r\n':
r += self.visa.read_raw_n(1)
else:
self.write('POUT 0;PDST 3;PCIC 0;PRTP 4;PSCR %d;PRNT'%area_sel[area])
try:
while True:
r += self.visa.read_raw_n(1)
except visa_wrap.VisaIOError:
pass
self.write('OUTX 0') # return to gpib interface
self.set_timeout = old_to
return r
# serial poll status word: 0=INSTrument, 1=DISPlay, 2=INPuT, 3=ERRor, 4=output buffer empty
# 5=standard status word, 6=SRQ, 7=IFC (no command execution in progress)
def get_instrument_status(self):
"""
returns a byte of bit flags
bit 0 (1): A measurement has been triggered
bit 1 (2): Disk operation complete
bit 2 (4): Hardcopy output complete
bit 3 (8): unused
bit 4 (16): Capture buffer filled
bit 5 (32): Measurement has been paused
bit 6 (64): Measurement has been started
bit 7 (128): Single shot capture playback has finished
bit 8 (256): Measurement stopped to wait for average preview
bit 9-15: unused
"""
# can access bits with inst? 1
# can enable in status register with INSE
return int(self.ask('INST?'))
def get_display_status(self):
"""
returns a byte of bit flags
bit 0 (1): displayA has new data
bit 1 (2): displayA linear average complete
bit 2 (4): displayA new settled data available
bit 3 (8): displayA failed a limit test
bit 4 (16): displayA swept sine has failed
bit 5 (32): displayA 1-shot waterfall has finished
bit 6-7: unused
bit 8 (256): displayB has new data
bit 9 (512): displayB linear average complete
bit 10 (1024):displayB new settled data available
bit 11 (2048):displayB failed a limit test
bit 12 (4096):displayB swept sine has failed
bit 13 (8192):displayB 1-shot waterfall has finished
bit 14-15: unused
except for waterfall always test for new data (bit 0/8) for
the correct display first.
"""
# can access bits with inst? 1
# can enable in status register with DSPE
return int(self.ask('DSPS?'))
def get_input_status(self):
"""
returns a byte of bit flags
bit 0 (1): input1 has fallend below half of full scale
bit 1 (2): input1 has exceeded half of full scale
bit 2 (4): input1 has exceeded full scale
bit 3 (8): input1 has exceeded 35 dBV, range switched to 34 dBV
bit 4 (16): input1 has autoranged
bit 5-7: unused
bit 8 (256): input2 has fallend below half of full scale
bit 9 (512): input2 has exceeded half of full scale
bit 10 (1024):input2 has exceeded full scale
bit 11 (2048):input2 has exceeded 35 dBV, range switched to 34 dBV
bit 12 (4096):input2 has autoranged
bit 13-15: unused
"""
# can access bits with inst? 1
# can enable in status register with INPE
# also see INPC? 0 (ch1) or INPC? 1 (ch2)
# which returns instanteneous a value 0-3 where:
# 0=input under half full scale
# 1=input over half full scale
# 2=input overloaded
# 3=input is HighV
return int(self.ask('INPS?'))
@locked_calling
def get_error(self):
"""
returns two byte of bit flags
first:
bit 0-1: unused
bit 2 (4): Too many responses are pending
bit 3 (8): too many commands received
bit 4 (16): command cannot execute successfully
bit 5 (32): command syntax error
bit 6 (64): key press or knob rotated
bit 7 (128): power is turned on
bit 8-15: unused
second:
bit 0 (1): An output error as occured (print, plot, dump)
bit 1 (2): disk errro
bit 2 (4): math error
bit 3 (8): RAM memory test fails
bit 4 (16): ROM memory test fails
bit 5 (32): Video memory test fails
bit 6 (64): Help memory test fails
bit 7 (128): DSP data memory fails
bit 8 (256): DSP program memory fails
bit 9 (512): DSP DRAM memory fails
bit 10 (1024): DSP calibration memory fails
bit 11 (2048): Ch1 calibration memory fails
bit 12 (4096): Ch2 calibration memory fails
bit 13-15: unused
"""
# can access bits with errs? 1
# can enable in status register with ERRE
# enable *ese with *ese
return int(self.ask('*esr?')),int(self.ask('ERRS?'))
#######################################################
## Lakeshore 325 Temperature controller
#######################################################
#@register_instrument('LSCI', 'MODEL325', '1.7/1.1')
@register_instrument('LSCI', 'MODEL325')
class lakeshore_325(visaInstrument):
"""
Temperature controller
Useful device:
sa
sb
ta
tb
status_a
status_b
fetch
s? and t? return the sensor or kelvin value of a certain channel
status_? returns the status of the channel
fetch allows to read all channels
"""
def _fetch_helper(self, ch=None):
if ch == None:
ch = self.enabled_list.getcache()
if not isinstance(ch, (list, ChoiceBase)):
ch = [ch]
return ch
def _fetch_getformat(self, **kwarg):
ch = kwarg.get('ch', None)
ch = self._fetch_helper(ch)
multi = []
graph = []
for i, c in enumerate(ch):
graph.append(2*i)
multi.extend([c+'_T', c+'_S'])
fmt = self.fetch._format
fmt.update(multi=multi, graph=graph)
return BaseDevice.getformat(self.fetch, **kwarg)
def _fetch_getdev(self, ch=None):
ch = self._fetch_helper(ch)
ret = []
for c in ch:
if c == 'A':
ret.append(self.ta.get())
ret.append(self.sa.get())
elif c == 'B':
ret.append(self.tb.get())
ret.append(self.sb.get())
else:
raise ValueError("Invalid selection for ch. If it is None, check that enabled_list is a list with 'A' and/or 'B'")
return ret
def _current_config(self, dev_obj=None, options={}):
return self._conf_helper('sp', options)
def _create_devs(self):
self.crdg = scpiDevice(getstr='CRDG? A', str_type=float)
self.enabled_list = MemoryDevice(['A', 'B'])
self.thermocouple = scpiDevice(getstr='TEMP?', str_type=float)
self.ta = scpiDevice(getstr='KRDG? A', str_type=float) #in Kelvin
self.tb = scpiDevice(getstr='KRDG? B', str_type=float) #in Kelvin
self.sa = scpiDevice(getstr='SRDG? A', str_type=float) #in sensor unit: Ohm, V or mV
self.sb = scpiDevice(getstr='SRDG? B', str_type=float) #in sensor unit
self.status_a = scpiDevice(getstr='RDGST? A', str_type=int) #flags 1(0)=invalid, 16(4)=temp underrange,
#32(5)=temp overrange, 64(6)=sensor under (<0), 128(7)=sensor overrange
# 000 = valid
self.status_b = scpiDevice(getstr='RDGST? b', str_type=int)
self.htr = scpiDevice(getstr='HTR?', str_type=float) #heater out in %
self.sp = scpiDevice(setstr='SETP 1,', getstr='SETP? 1', str_type=float)
self._devwrap('fetch', autoinit=False)
self.alias = self.fetch
# This needs to be last to complete creation
super(type(self),self)._create_devs()
#######################################################
## Lakeshore 340 Temperature controller
#######################################################
register_idn_alias('Lake Shore Cryotronics', 'LSCI')
#@register_instrument('LSCI', 'MODEL340', '061407')
@register_instrument('LSCI', 'MODEL340')
class lakeshore_340(visaInstrument):
"""
Temperature controller used for He3 system
Useful device:
s
t
fetch
status_ch
current_ch
s and t return the sensor or kelvin value of a certain channel
which defaults to current_ch
status_ch returns the status of ch
fetch allows to read all channels
"""
def _current_config(self, dev_obj=None, options={}):
| if dev_obj == self.fetch: | 4,119 | lcc_e | python | null | 8315f5f7696d567019104d0bca173bb58486f52c04ab7202 |
|
"""
Copyright (C) 2011-2015 Parametric Products Intellectual Holdings, LLC
This file is part of CadQuery.
CadQuery is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
CadQuery is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; If not, see <http://www.gnu.org/licenses/>
"""
import time
import math
from cadquery import *
from cadquery import selectors
from cadquery import exporters
from copy import copy, deepcopy
class CQContext(object):
"""
A shared context for modeling.
All objects in the same CQ chain share a reference to this same object instance
which allows for shared state when needed,
"""
def __init__(self):
self.pendingWires = [] # a list of wires that have been created and need to be extruded
self.pendingEdges = [] # a list of created pending edges that need to be joined into wires
# a reference to the first point for a set of edges.
# Used to determine how to behave when close() is called
self.firstPoint = None
self.tolerance = 0.0001 # user specified tolerance
class CQ(object):
"""
Provides enhanced functionality for a wrapped CAD primitive.
Examples include feature selection, feature creation, 2d drawing
using work planes, and 3d operations like fillets, shells, and splitting
"""
def __init__(self, obj):
"""
Construct a new CadQuery (CQ) object that wraps a CAD primitive.
:param obj: Object to Wrap.
:type obj: A CAD Primitive ( wire,vertex,face,solid,edge )
"""
self.objects = []
self.ctx = CQContext()
self.parent = None
if obj: # guarded because sometimes None for internal use
self.objects.append(obj)
def newObject(self, objlist):
"""
Make a new CQ object.
:param objlist: The stack of objects to use
:type objlist: a list of CAD primitives ( wire,face,edge,solid,vertex,etc )
The parent of the new object will be set to the current object,
to preserve the chain correctly.
Custom plugins and subclasses should use this method to create new CQ objects
correctly.
"""
r = type(self)(None) # create a completely blank one
r.parent = self
r.ctx = self.ctx # context solid remains the same
r.objects = list(objlist)
return r
def _collectProperty(self, propName):
"""
Collects all of the values for propName,
for all items on the stack.
FreeCAD objects do not implement id correctly,
so hashCode is used to ensure we don't add the same
object multiple times.
One weird use case is that the stack could have a solid reference object
on it. This is meant to be a reference to the most recently modified version
of the context solid, whatever it is.
"""
all = {}
for o in self.objects:
# tricky-- if an object is a compound of solids,
# do not return all of the solids underneath-- typically
# then we'll keep joining to ourself
if propName == 'Solids' and isinstance(o, Solid) and o.ShapeType() == 'Compound':
for i in getattr(o, 'Compounds')():
all[i.hashCode()] = i
else:
if hasattr(o, propName):
for i in getattr(o, propName)():
all[i.hashCode()] = i
return list(all.values())
def split(self, keepTop=False, keepBottom=False):
"""
Splits a solid on the stack into two parts, optionally keeping the separate parts.
:param boolean keepTop: True to keep the top, False or None to discard it
:param boolean keepBottom: True to keep the bottom, False or None to discard it
:raises: ValueError if keepTop and keepBottom are both false.
:raises: ValueError if there is not a solid in the current stack or the parent chain
:returns: CQ object with the desired objects on the stack.
The most common operation splits a solid and keeps one half. This sample creates
split bushing::
#drill a hole in the side
c = Workplane().box(1,1,1).faces(">Z").workplane().circle(0.25).cutThruAll()F
#now cut it in half sideways
c.faces(">Y").workplane(-0.5).split(keepTop=True)
"""
solid = self.findSolid()
if (not keepTop) and (not keepBottom):
raise ValueError("You have to keep at least one half")
maxDim = solid.BoundingBox().DiagonalLength * 10.0
topCutBox = self.rect(maxDim, maxDim)._extrude(maxDim)
bottomCutBox = self.rect(maxDim, maxDim)._extrude(-maxDim)
top = solid.cut(bottomCutBox)
bottom = solid.cut(topCutBox)
if keepTop and keepBottom:
# Put both on the stack, leave original unchanged.
return self.newObject([top, bottom])
else:
# Put the one we are keeping on the stack, and also update the
# context solidto the one we kept.
if keepTop:
solid.wrapped = top.wrapped
return self.newObject([top])
else:
solid.wrapped = bottom.wrapped
return self.newObject([bottom])
def combineSolids(self, otherCQToCombine=None):
"""
!!!DEPRECATED!!! use union()
Combines all solids on the current stack, and any context object, together
into a single object.
After the operation, the returned solid is also the context solid.
:param otherCQToCombine: another CadQuery to combine.
:return: a cQ object with the resulting combined solid on the stack.
Most of the time, both objects will contain a single solid, which is
combined and returned on the stack of the new object.
"""
#loop through current stack objects, and combine them
#TODO: combine other types of objects as well, like edges and wires
toCombine = self.solids().vals()
if otherCQToCombine:
for obj in otherCQToCombine.solids().vals():
toCombine.append(obj)
if len(toCombine) < 1:
raise ValueError("Cannot Combine: at least one solid required!")
#get context solid and we don't want to find our own objects
ctxSolid = self.findSolid(searchStack=False, searchParents=True)
if ctxSolid is None:
ctxSolid = toCombine.pop(0)
#now combine them all. make sure to save a reference to the ctxSolid pointer!
s = ctxSolid
for tc in toCombine:
s = s.fuse(tc)
ctxSolid.wrapped = s.wrapped
return self.newObject([s])
def all(self):
"""
Return a list of all CQ objects on the stack.
useful when you need to operate on the elements
individually.
Contrast with vals, which returns the underlying
objects for all of the items on the stack
"""
return [self.newObject([o]) for o in self.objects]
def size(self):
"""
Return the number of objects currently on the stack
"""
return len(self.objects)
def vals(self):
"""
get the values in the current list
:rtype: list of FreeCAD objects
:returns: the values of the objects on the stack.
Contrast with :py:meth:`all`, which returns CQ objects for all of the items on the stack
"""
return self.objects
def add(self, obj):
"""
Adds an object or a list of objects to the stack
:param obj: an object to add
:type obj: a CQ object, CAD primitive, or list of CAD primitives
:return: a CQ object with the requested operation performed
If an CQ object, the values of that object's stack are added. If a list of cad primitives,
they are all added. If a single CAD primitive it is added
Used in rare cases when you need to combine the results of several CQ results
into a single CQ object. Shelling is one common example
"""
if type(obj) == list:
self.objects.extend(obj)
elif isinstance(obj, CQ):
self.objects.extend(obj.objects)
else:
self.objects.append(obj)
return self
def val(self):
"""
Return the first value on the stack
:return: the first value on the stack.
:rtype: A FreeCAD object or a SolidReference
"""
return self.objects[0]
def toFreecad(self):
"""
Directly returns the wrapped FreeCAD object to cut down on the amount of boiler plate code
needed when rendering a model in FreeCAD's 3D view.
:return: The wrapped FreeCAD object
:rtype A FreeCAD object or a SolidReference
"""
return self.objects[0].wrapped
def workplane(self, offset=0.0, invert=False, centerOption='CenterOfMass'):
"""
Creates a new 2-D workplane, located relative to the first face on the stack.
:param offset: offset for the work plane in the Z direction. Default
:param invert: invert the Z direction from that of the face.
:type offset: float or None=0.0
:type invert: boolean or None=False
:rtype: Workplane object ( which is a subclass of CQ )
The first element on the stack must be a face, a set of
co-planar faces or a vertex. If a vertex, then the parent
item on the chain immediately before the vertex must be a
face.
The result will be a 2-d working plane
with a new coordinate system set up as follows:
* The origin will be located in the *center* of the
face/faces, if a face/faces was selected. If a vertex was
selected, the origin will be at the vertex, and located
on the face.
* The Z direction will be normal to the plane of the face,computed
at the center point.
* The X direction will be parallel to the x-y plane. If the workplane is parallel to
the global x-y plane, the x direction of the workplane will co-incide with the
global x direction.
Most commonly, the selected face will be planar, and the workplane lies in the same plane
of the face ( IE, offset=0). Occasionally, it is useful to define a face offset from
an existing surface, and even more rarely to define a workplane based on a face that is
not planar.
To create a workplane without first having a face, use the Workplane() method.
Future Enhancements:
* Allow creating workplane from planar wires
* Allow creating workplane based on an arbitrary point on a face, not just the center.
For now you can work around by creating a workplane and then offsetting the center
afterwards.
"""
def _isCoPlanar(f0, f1):
"""Test if two faces are on the same plane."""
p0 = f0.Center()
p1 = f1.Center()
n0 = f0.normalAt()
n1 = f1.normalAt()
# test normals (direction of planes)
if not ((abs(n0.x-n1.x) < self.ctx.tolerance) or
(abs(n0.y-n1.y) < self.ctx.tolerance) or
(abs(n0.z-n1.z) < self.ctx.tolerance)):
return False
# test if p1 is on the plane of f0 (offset of planes)
return abs(n0.dot(p0.sub(p1)) < self.ctx.tolerance)
def _computeXdir(normal):
"""
Figures out the X direction based on the given normal.
:param :normal The direction that's normal to the plane.
:type :normal A Vector
:return A vector representing the X direction.
"""
xd = Vector(0, 0, 1).cross(normal)
if xd.Length < self.ctx.tolerance:
#this face is parallel with the x-y plane, so choose x to be in global coordinates
xd = Vector(1, 0, 0)
return xd
if len(self.objects) > 1:
# are all objects 'PLANE'?
if not all(o.geomType() == 'PLANE' for o in self.objects):
raise ValueError("If multiple objects selected, they all must be planar faces.")
# are all faces co-planar with each other?
if not all(_isCoPlanar(self.objects[0], f) for f in self.objects[1:]):
raise ValueError("Selected faces must be co-planar.")
if centerOption == 'CenterOfMass':
center = Shape.CombinedCenter(self.objects)
elif centerOption == 'CenterOfBoundBox':
center = Shape.CombinedCenterOfBoundBox(self.objects)
normal = self.objects[0].normalAt()
xDir = _computeXdir(normal)
else:
obj = self.objects[0]
if isinstance(obj, Face):
if centerOption == 'CenterOfMass':
center = obj.Center()
elif centerOption == 'CenterOfBoundBox':
center = obj.CenterOfBoundBox()
normal = obj.normalAt(center)
xDir = _computeXdir(normal)
else:
if hasattr(obj, 'Center'):
if centerOption == 'CenterOfMass':
center = obj.Center()
elif centerOption == 'CenterOfBoundBox':
center = obj.CenterOfBoundBox()
normal = self.plane.zDir
xDir = self.plane.xDir
else:
raise ValueError("Needs a face or a vertex or point on a work plane")
#invert if requested
if invert:
normal = normal.multiply(-1.0)
#offset origin if desired
offsetVector = normal.normalized().multiply(offset)
offsetCenter = center.add(offsetVector)
#make the new workplane
plane = Plane(offsetCenter, xDir, normal)
s = Workplane(plane)
s.parent = self
s.ctx = self.ctx
#a new workplane has the center of the workplane on the stack
return s
def first(self):
"""
Return the first item on the stack
:returns: the first item on the stack.
:rtype: a CQ object
"""
return self.newObject(self.objects[0:1])
def item(self, i):
"""
Return the ith item on the stack.
:rtype: a CQ object
"""
return self.newObject([self.objects[i]])
def last(self):
"""
Return the last item on the stack.
:rtype: a CQ object
"""
return self.newObject([self.objects[-1]])
def end(self):
"""
Return the parent of this CQ element
:rtype: a CQ object
:raises: ValueError if there are no more parents in the chain.
For example::
CQ(obj).faces("+Z").vertices().end()
will return the same as::
CQ(obj).faces("+Z")
"""
if self.parent:
return self.parent
else:
raise ValueError("Cannot End the chain-- no parents!")
def findSolid(self, searchStack=True, searchParents=True):
"""
Finds the first solid object in the chain, searching from the current node
backwards through parents until one is found.
:param searchStack: should objects on the stack be searched first.
:param searchParents: should parents be searched?
:raises: ValueError if no solid is found in the current object or its parents,
and errorOnEmpty is True
This function is very important for chains that are modifying a single parent object,
most often a solid.
Most of the time, a chain defines or selects a solid, and then modifies it using workplanes
or other operations.
Plugin Developers should make use of this method to find the solid that should be modified,
if the plugin implements a unary operation, or if the operation will automatically merge its
results with an object already on the stack.
"""
#notfound = ValueError("Cannot find a Valid Solid to Operate on!")
if searchStack:
for s in self.objects:
if isinstance(s, Solid):
return s
elif isinstance(s, Compound):
return s.Solids()
if searchParents and self.parent is not None:
return self.parent.findSolid(searchStack=True, searchParents=searchParents)
return None
def _selectObjects(self, objType, selector=None):
"""
Filters objects of the selected type with the specified selector,and returns results
:param objType: the type of object we are searching for
:type objType: string: (Vertex|Edge|Wire|Solid|Shell|Compound|CompSolid)
:return: a CQ object with the selected objects on the stack.
**Implementation Note**: This is the base implementation of the vertices,edges,faces,
solids,shells, and other similar selector methods. It is a useful extension point for
plugin developers to make other selector methods.
"""
# A single list of all faces from all objects on the stack
toReturn = self._collectProperty(objType)
if selector is not None:
# if isinstance(selector, str) or isinstance(selector, str):
try:
selectorObj = selectors.StringSyntaxSelector(selector)
except:
selectorObj = selector
toReturn = selectorObj.filter(toReturn)
return self.newObject(toReturn)
def vertices(self, selector=None):
"""
Select the vertices of objects on the stack, optionally filtering the selection. If there
are multiple objects on the stack, the vertices of all objects are collected and a list of
all the distinct vertices is returned.
:param selector:
:type selector: None, a Selector object, or a string selector expression.
:return: a CQ object who's stack contains the *distinct* vertices of *all* objects on the
current stack, after being filtered by the selector, if provided
If there are no vertices for any objects on the current stack, an empty CQ object
is returned
The typical use is to select the vertices of a single object on the stack. For example::
Workplane().box(1,1,1).faces("+Z").vertices().size()
returns 4, because the topmost face of cube will contain four vertices. While this::
Workplane().box(1,1,1).faces().vertices().size()
returns 8, because a cube has a total of 8 vertices
**Note** Circles are peculiar, they have a single vertex at the center!
:py:class:`StringSyntaxSelector`
"""
return self._selectObjects('Vertices', selector)
def faces(self, selector=None):
"""
Select the faces of objects on the stack, optionally filtering the selection. If there are
multiple objects on the stack, the faces of all objects are collected and a list of all the
distinct faces is returned.
:param selector: A selector
:type selector: None, a Selector object, or a string selector expression.
:return: a CQ object who's stack contains all of the *distinct* faces of *all* objects on
the current stack, filtered by the provided selector.
If there are no vertices for any objects on the current stack, an empty CQ object
is returned.
The typical use is to select the faces of a single object on the stack. For example::
CQ(aCube).faces("+Z").size()
returns 1, because a cube has one face with a normal in the +Z direction. Similarly::
CQ(aCube).faces().size()
returns 6, because a cube has a total of 6 faces, And::
CQ(aCube).faces("|Z").size()
returns 2, because a cube has 2 faces having normals parallel to the z direction
See more about selectors HERE
"""
return self._selectObjects('Faces', selector)
def edges(self, selector=None):
"""
Select the edges of objects on the stack, optionally filtering the selection. If there are
multiple objects on the stack, the edges of all objects are collected and a list of all the
distinct edges is returned.
:param selector: A selector
:type selector: None, a Selector object, or a string selector expression.
:return: a CQ object who's stack contains all of the *distinct* edges of *all* objects on
the current stack, filtered by the provided selector.
If there are no edges for any objects on the current stack, an empty CQ object is returned
The typical use is to select the edges of a single object on the stack. For example::
CQ(aCube).faces("+Z").edges().size()
returns 4, because a cube has one face with a normal in the +Z direction. Similarly::
CQ(aCube).edges().size()
returns 12, because a cube has a total of 12 edges, And::
CQ(aCube).edges("|Z").size()
returns 4, because a cube has 4 edges parallel to the z direction
See more about selectors HERE
"""
return self._selectObjects('Edges', selector)
def wires(self, selector=None):
"""
Select the wires of objects on the stack, optionally filtering the selection. If there are
multiple objects on the stack, the wires of all objects are collected and a list of all the
distinct wires is returned.
:param selector: A selector
:type selector: None, a Selector object, or a string selector expression.
:return: a CQ object who's stack contains all of the *distinct* wires of *all* objects on
the current stack, filtered by the provided selector.
If there are no wires for any objects on the current stack, an empty CQ object is returned
The typical use is to select the wires of a single object on the stack. For example::
CQ(aCube).faces("+Z").wires().size()
returns 1, because a face typically only has one outer wire
See more about selectors HERE
"""
return self._selectObjects('Wires', selector)
def solids(self, selector=None):
"""
Select the solids of objects on the stack, optionally filtering the selection. If there are
multiple objects on the stack, the solids of all objects are collected and a list of all the
distinct solids is returned.
:param selector: A selector
:type selector: None, a Selector object, or a string selector expression.
:return: a CQ object who's stack contains all of the *distinct* solids of *all* objects on
the current stack, filtered by the provided selector.
If there are no solids for any objects on the current stack, an empty CQ object is returned
The typical use is to select the a single object on the stack. For example::
CQ(aCube).solids().size()
returns 1, because a cube consists of one solid.
It is possible for single CQ object ( or even a single CAD primitive ) to contain
multiple solids.
See more about selectors HERE
"""
return self._selectObjects('Solids', selector)
def shells(self, selector=None):
"""
Select the shells of objects on the stack, optionally filtering the selection. If there are
multiple objects on the stack, the shells of all objects are collected and a list of all the
distinct shells is returned.
:param selector: A selector
:type selector: None, a Selector object, or a string selector expression.
:return: a CQ object who's stack contains all of the *distinct* solids of *all* objects on
the current stack, filtered by the provided selector.
If there are no shells for any objects on the current stack, an empty CQ object is returned
Most solids will have a single shell, which represents the outer surface. A shell will
typically be composed of multiple faces.
See more about selectors HERE
"""
return self._selectObjects('Shells', selector)
def compounds(self, selector=None):
"""
Select compounds on the stack, optionally filtering the selection. If there are multiple
objects on the stack, they are collected and a list of all the distinct compounds
is returned.
:param selector: A selector
:type selector: None, a Selector object, or a string selector expression.
:return: a CQ object who's stack contains all of the *distinct* solids of *all* objects on
the current stack, filtered by the provided selector.
A compound contains multiple CAD primitives that resulted from a single operation, such as
a union, cut, split, or fillet. Compounds can contain multiple edges, wires, or solids.
See more about selectors HERE
"""
return self._selectObjects('Compounds', selector)
def toSvg(self, opts=None, view_vector=(-1.75,1.1,5)):
"""
Returns svg text that represents the first item on the stack.
for testing purposes.
:param opts: svg formatting options
:type opts: dictionary, width and height
:param view_vector: camera's view direction vector
:type view_vector: tuple, (x, y, z)
:return: a string that contains SVG that represents this item.
"""
return exporters.getSVG(self.val().wrapped, opts=opts, view_vector=view_vector)
def exportSvg(self, fileName, view_vector=(-1.75,1.1,5)):
"""
Exports the first item on the stack as an SVG file
For testing purposes mainly.
:param fileName: the filename to export
:param view_vector: camera's view direction vector
:type view_vector: tuple, (x, y, z)
:type fileName: String, absolute path to the file
"""
exporters.exportSVG(self, fileName, view_vector)
def rotateAboutCenter(self, axisEndPoint, angleDegrees):
"""
Rotates all items on the stack by the specified angle, about the specified axis
The center of rotation is a vector starting at the center of the object on the stack,
and ended at the specified point.
:param axisEndPoint: the second point of axis of rotation
:type axisEndPoint: a three-tuple in global coordinates
:param angleDegrees: the rotation angle, in degrees
:type angleDegrees: float
:returns: a CQ object, with all items rotated.
WARNING: This version returns the same cq object instead of a new one-- the
old object is not accessible.
Future Enhancements:
* A version of this method that returns a transformed copy, rather than modifying
the originals
* This method doesn't expose a very good interface, because the axis of rotation
could be inconsistent between multiple objects. This is because the beginning
of the axis is variable, while the end is fixed. This is fine when operating on
one object, but is not cool for multiple.
"""
#center point is the first point in the vector
endVec = Vector(axisEndPoint)
def _rot(obj):
startPt = obj.Center()
endPt = startPt + endVec
return obj.rotate(startPt, endPt, angleDegrees)
return self.each(_rot, False)
def rotate(self, axisStartPoint, axisEndPoint, angleDegrees):
"""
Returns a copy of all of the items on the stack rotated through and angle around the axis
of rotation.
:param axisStartPoint: The first point of the axis of rotation
:type axisStartPoint: a 3-tuple of floats
:type axisEndPoint: The second point of the axis of rotation
:type axisEndPoint: a 3-tuple of floats
:param angleDegrees: the rotation angle, in degrees
:type angleDegrees: float
:returns: a CQ object
"""
return self.newObject([o.rotate(axisStartPoint, axisEndPoint, angleDegrees)
for o in self.objects])
def mirror(self, mirrorPlane="XY", basePointVector=(0, 0, 0)):
"""
Mirror a single CQ object. This operation is the same as in the FreeCAD PartWB's mirroring
:param mirrorPlane: the plane to mirror about
:type mirrorPlane: string, one of "XY", "YX", "XZ", "ZX", "YZ", "ZY" the planes
:param basePointVector: the base point to mirror about
:type basePointVector: tuple
"""
newS = self.newObject([self.objects[0].mirror(mirrorPlane, basePointVector)])
return newS.first()
def translate(self, vec):
"""
Returns a copy of all of the items on the stack moved by the specified translation vector.
:param tupleDistance: distance to move, in global coordinates
:type tupleDistance: a 3-tuple of float
:returns: a CQ object
"""
return self.newObject([o.translate(vec) for o in self.objects])
def shell(self, thickness):
"""
Remove the selected faces to create a shell of the specified thickness.
To shell, first create a solid, and *in the same chain* select the faces you wish to remove.
:param thickness: a positive float, representing the thickness of the desired shell.
Negative values shell inwards, positive values shell outwards.
:raises: ValueError if the current stack contains objects that are not faces of a solid
further up in the chain.
:returns: a CQ object with the resulting shelled solid selected.
This example will create a hollowed out unit cube, where the top most face is open,
and all other walls are 0.2 units thick::
Workplane().box(1,1,1).faces("+Z").shell(0.2)
Shelling is one of the cases where you may need to use the add method to select several
faces. For example, this example creates a 3-walled corner, by removing three faces
of a cube::
s = Workplane().box(1,1,1)
s1 = s.faces("+Z")
s1.add(s.faces("+Y")).add(s.faces("+X"))
self.saveModel(s1.shell(0.2))
This fairly yucky syntax for selecting multiple faces is planned for improvement
**Note**: When sharp edges are shelled inwards, they remain sharp corners, but **outward**
shells are automatically filleted, because an outward offset from a corner generates
a radius.
Future Enhancements:
Better selectors to make it easier to select multiple faces
"""
solidRef = self.findSolid()
for f in self.objects:
if type(f) != Face:
raise ValueError("Shelling requires that faces be selected")
s = solidRef.shell(self.objects, thickness)
solidRef.wrapped = s.wrapped
return self.newObject([s])
def fillet(self, radius):
"""
Fillets a solid on the selected edges.
The edges on the stack are filleted. The solid to which the edges belong must be in the
parent chain of the selected edges.
:param radius: the radius of the fillet, must be > zero
:type radius: positive float
:raises: ValueError if at least one edge is not selected
:raises: ValueError if the solid containing the edge is not in the chain
:returns: cq object with the resulting solid selected.
This example will create a unit cube, with the top edges filleted::
| s = Workplane().box(1,1,1).faces("+Z").edges().fillet(0.1) | 4,053 | lcc_e | python | null | edc5a05e949253312fe2af3c4fcdab85f5bf23272d2a6b86 |
|
# coding=utf-8
# Python Standard Library Imports
from time import sleep
from math import atan, atan2, sqrt
# External Imports
pass
# Custom Imports
from pycomms import PyComms, u_to_s, qv_mult
class MPU6050:
# Register map based on Jeff Rowberg <jeff@rowberg.net> source code at
# https://github.com/jrowberg/i2cdevlib/blob/master/Arduino/MPU6050
# /MPU6050.h
MPU6050_ADDRESS_AD0_LOW = 0x68 # address pin low (GND), default for
# InvenSense evaluation board
MPU6050_ADDRESS_AD0_HIGH = 0x69 # address pin high (VCC)
MPU6050_DEFAULT_ADDRESS = MPU6050_ADDRESS_AD0_LOW
MPU6050_RA_XG_OFFS_TC = 0x00 # [7] PWR_MODE, [6:1] XG_OFFS_TC,
# [0] OTP_BNK_VLD
MPU6050_RA_YG_OFFS_TC = 0x01 # [7] PWR_MODE, [6:1] YG_OFFS_TC,
# [0] OTP_BNK_VLD
MPU6050_RA_ZG_OFFS_TC = 0x02 # [7] PWR_MODE, [6:1] ZG_OFFS_TC,
# [0] OTP_BNK_VLD
MPU6050_RA_X_FINE_GAIN = 0x03 # [7:0] X_FINE_GAIN
MPU6050_RA_Y_FINE_GAIN = 0x04 # [7:0] Y_FINE_GAIN
MPU6050_RA_Z_FINE_GAIN = 0x05 # [7:0] Z_FINE_GAIN
MPU6050_RA_XA_OFFS_H = 0x06 # [15:0] XA_OFFS
MPU6050_RA_XA_OFFS_L_TC = 0x07
MPU6050_RA_YA_OFFS_H = 0x08 # [15:0] YA_OFFS
MPU6050_RA_YA_OFFS_L_TC = 0x09
MPU6050_RA_ZA_OFFS_H = 0x0A # [15:0] ZA_OFFS
MPU6050_RA_ZA_OFFS_L_TC = 0x0B
MPU6050_RA_XG_OFFS_USRH = 0x13 # [15:0] XG_OFFS_USR
MPU6050_RA_XG_OFFS_USRL = 0x14
MPU6050_RA_YG_OFFS_USRH = 0x15 # [15:0] YG_OFFS_USR
MPU6050_RA_YG_OFFS_USRL = 0x16
MPU6050_RA_ZG_OFFS_USRH = 0x17 # [15:0] ZG_OFFS_USR
MPU6050_RA_ZG_OFFS_USRL = 0x18
MPU6050_RA_SMPLRT_DIV = 0x19
MPU6050_RA_CONFIG = 0x1A
MPU6050_RA_GYRO_CONFIG = 0x1B
MPU6050_RA_ACCEL_CONFIG = 0x1C
MPU6050_RA_FF_THR = 0x1D
MPU6050_RA_FF_DUR = 0x1E
MPU6050_RA_MOT_THR = 0x1F
MPU6050_RA_MOT_DUR = 0x20
MPU6050_RA_ZRMOT_THR = 0x21
MPU6050_RA_ZRMOT_DUR = 0x22
MPU6050_RA_FIFO_EN = 0x23
MPU6050_RA_I2C_MST_CTRL = 0x24
MPU6050_RA_I2C_SLV0_ADDR = 0x25
MPU6050_RA_I2C_SLV0_REG = 0x26
MPU6050_RA_I2C_SLV0_CTRL = 0x27
MPU6050_RA_I2C_SLV1_ADDR = 0x28
MPU6050_RA_I2C_SLV1_REG = 0x29
MPU6050_RA_I2C_SLV1_CTRL = 0x2A
MPU6050_RA_I2C_SLV2_ADDR = 0x2B
MPU6050_RA_I2C_SLV2_REG = 0x2C
MPU6050_RA_I2C_SLV2_CTRL = 0x2D
MPU6050_RA_I2C_SLV3_ADDR = 0x2E
MPU6050_RA_I2C_SLV3_REG = 0x2F
MPU6050_RA_I2C_SLV3_CTRL = 0x30
MPU6050_RA_I2C_SLV4_ADDR = 0x31
MPU6050_RA_I2C_SLV4_REG = 0x32
MPU6050_RA_I2C_SLV4_DO = 0x33
MPU6050_RA_I2C_SLV4_CTRL = 0x34
MPU6050_RA_I2C_SLV4_DI = 0x35
MPU6050_RA_I2C_MST_STATUS = 0x36
MPU6050_RA_INT_PIN_CFG = 0x37
MPU6050_RA_INT_ENABLE = 0x38
MPU6050_RA_DMP_INT_STATUS = 0x39
MPU6050_RA_INT_STATUS = 0x3A
MPU6050_RA_ACCEL_XOUT_H = 0x3B
MPU6050_RA_ACCEL_XOUT_L = 0x3C
MPU6050_RA_ACCEL_YOUT_H = 0x3D
MPU6050_RA_ACCEL_YOUT_L = 0x3E
MPU6050_RA_ACCEL_ZOUT_H = 0x3F
MPU6050_RA_ACCEL_ZOUT_L = 0x40
MPU6050_RA_TEMP_OUT_H = 0x41
MPU6050_RA_TEMP_OUT_L = 0x42
MPU6050_RA_GYRO_XOUT_H = 0x43
MPU6050_RA_GYRO_XOUT_L = 0x44
MPU6050_RA_GYRO_YOUT_H = 0x45
MPU6050_RA_GYRO_YOUT_L = 0x46
MPU6050_RA_GYRO_ZOUT_H = 0x47
MPU6050_RA_GYRO_ZOUT_L = 0x48
MPU6050_RA_EXT_SENS_DATA_00 = 0x49
MPU6050_RA_EXT_SENS_DATA_01 = 0x4A
MPU6050_RA_EXT_SENS_DATA_02 = 0x4B
MPU6050_RA_EXT_SENS_DATA_03 = 0x4C
MPU6050_RA_EXT_SENS_DATA_04 = 0x4D
MPU6050_RA_EXT_SENS_DATA_05 = 0x4E
MPU6050_RA_EXT_SENS_DATA_06 = 0x4F
MPU6050_RA_EXT_SENS_DATA_07 = 0x50
MPU6050_RA_EXT_SENS_DATA_08 = 0x51
MPU6050_RA_EXT_SENS_DATA_09 = 0x52
MPU6050_RA_EXT_SENS_DATA_10 = 0x53
MPU6050_RA_EXT_SENS_DATA_11 = 0x54
MPU6050_RA_EXT_SENS_DATA_12 = 0x55
MPU6050_RA_EXT_SENS_DATA_13 = 0x56
MPU6050_RA_EXT_SENS_DATA_14 = 0x57
MPU6050_RA_EXT_SENS_DATA_15 = 0x58
MPU6050_RA_EXT_SENS_DATA_16 = 0x59
MPU6050_RA_EXT_SENS_DATA_17 = 0x5A
MPU6050_RA_EXT_SENS_DATA_18 = 0x5B
MPU6050_RA_EXT_SENS_DATA_19 = 0x5C
MPU6050_RA_EXT_SENS_DATA_20 = 0x5D
MPU6050_RA_EXT_SENS_DATA_21 = 0x5E
MPU6050_RA_EXT_SENS_DATA_22 = 0x5F
MPU6050_RA_EXT_SENS_DATA_23 = 0x60
MPU6050_RA_MOT_DETECT_STATUS = 0x61
MPU6050_RA_I2C_SLV0_DO = 0x63
MPU6050_RA_I2C_SLV1_DO = 0x64
MPU6050_RA_I2C_SLV2_DO = 0x65
MPU6050_RA_I2C_SLV3_DO = 0x66
MPU6050_RA_I2C_MST_DELAY_CTRL = 0x67
MPU6050_RA_SIGNAL_PATH_RESET = 0x68
MPU6050_RA_MOT_DETECT_CTRL = 0x69
MPU6050_RA_USER_CTRL = 0x6A
MPU6050_RA_PWR_MGMT_1 = 0x6B
MPU6050_RA_PWR_MGMT_2 = 0x6C
MPU6050_RA_BANK_SEL = 0x6D
MPU6050_RA_MEM_START_ADDR = 0x6E
MPU6050_RA_MEM_R_W = 0x6F
MPU6050_RA_DMP_CFG_1 = 0x70
MPU6050_RA_DMP_CFG_2 = 0x71
MPU6050_RA_FIFO_COUNTH = 0x72
MPU6050_RA_FIFO_COUNTL = 0x73
MPU6050_RA_FIFO_R_W = 0x74
MPU6050_RA_WHO_AM_I = 0x75
MPU6050_TC_PWR_MODE_BIT = 7
MPU6050_TC_OFFSET_BIT = 6
MPU6050_TC_OFFSET_LENGTH = 6
MPU6050_TC_OTP_BNK_VLD_BIT = 0
MPU6050_VDDIO_LEVEL_VLOGIC = 0
MPU6050_VDDIO_LEVEL_VDD = 1
MPU6050_CFG_EXT_SYNC_SET_BIT = 5
MPU6050_CFG_EXT_SYNC_SET_LENGTH = 3
MPU6050_CFG_DLPF_CFG_BIT = 2
MPU6050_CFG_DLPF_CFG_LENGTH = 3
MPU6050_EXT_SYNC_DISABLED = 0x0
MPU6050_EXT_SYNC_TEMP_OUT_L = 0x1
MPU6050_EXT_SYNC_GYRO_XOUT_L = 0x2
MPU6050_EXT_SYNC_GYRO_YOUT_L = 0x3
MPU6050_EXT_SYNC_GYRO_ZOUT_L = 0x4
MPU6050_EXT_SYNC_ACCEL_XOUT_L = 0x5
MPU6050_EXT_SYNC_ACCEL_YOUT_L = 0x6
MPU6050_EXT_SYNC_ACCEL_ZOUT_L = 0x7
MPU6050_DLPF_BW_256 = 0x00
MPU6050_DLPF_BW_188 = 0x01
MPU6050_DLPF_BW_98 = 0x02
MPU6050_DLPF_BW_42 = 0x03
MPU6050_DLPF_BW_20 = 0x04
MPU6050_DLPF_BW_10 = 0x05
MPU6050_DLPF_BW_5 = 0x06
MPU6050_GCONFIG_FS_SEL_BIT = 4
MPU6050_GCONFIG_FS_SEL_LENGTH = 2
MPU6050_GYRO_FS_250 = 0x00
MPU6050_GYRO_FS_500 = 0x01
MPU6050_GYRO_FS_1000 = 0x02
MPU6050_GYRO_FS_2000 = 0x03
MPU6050_ACONFIG_XA_ST_BIT = 7
MPU6050_ACONFIG_YA_ST_BIT = 6
MPU6050_ACONFIG_ZA_ST_BIT = 5
MPU6050_ACONFIG_AFS_SEL_BIT = 4
MPU6050_ACONFIG_AFS_SEL_LENGTH = 2
MPU6050_ACONFIG_ACCEL_HPF_BIT = 2
MPU6050_ACONFIG_ACCEL_HPF_LENGTH = 3
MPU6050_ACCEL_FS_2 = 0x00
MPU6050_ACCEL_FS_4 = 0x01
MPU6050_ACCEL_FS_8 = 0x02
MPU6050_ACCEL_FS_16 = 0x03
MPU6050_DHPF_RESET = 0x00
MPU6050_DHPF_5 = 0x01
MPU6050_DHPF_2P5 = 0x02
MPU6050_DHPF_1P25 = 0x03
MPU6050_DHPF_0P63 = 0x04
MPU6050_DHPF_HOLD = 0x07
MPU6050_TEMP_FIFO_EN_BIT = 7
MPU6050_XG_FIFO_EN_BIT = 6
MPU6050_YG_FIFO_EN_BIT = 5
MPU6050_ZG_FIFO_EN_BIT = 4
MPU6050_ACCEL_FIFO_EN_BIT = 3
MPU6050_SLV2_FIFO_EN_BIT = 2
MPU6050_SLV1_FIFO_EN_BIT = 1
MPU6050_SLV0_FIFO_EN_BIT = 0
MPU6050_MULT_MST_EN_BIT = 7
MPU6050_WAIT_FOR_ES_BIT = 6
MPU6050_SLV_3_FIFO_EN_BIT = 5
MPU6050_I2C_MST_P_NSR_BIT = 4
MPU6050_I2C_MST_CLK_BIT = 3
MPU6050_I2C_MST_CLK_LENGTH = 4
MPU6050_CLOCK_DIV_348 = 0x0
MPU6050_CLOCK_DIV_333 = 0x1
MPU6050_CLOCK_DIV_320 = 0x2
MPU6050_CLOCK_DIV_308 = 0x3
MPU6050_CLOCK_DIV_296 = 0x4
MPU6050_CLOCK_DIV_286 = 0x5
MPU6050_CLOCK_DIV_276 = 0x6
MPU6050_CLOCK_DIV_267 = 0x7
MPU6050_CLOCK_DIV_258 = 0x8
MPU6050_CLOCK_DIV_500 = 0x9
MPU6050_CLOCK_DIV_471 = 0xA
MPU6050_CLOCK_DIV_444 = 0xB
MPU6050_CLOCK_DIV_421 = 0xC
MPU6050_CLOCK_DIV_400 = 0xD
MPU6050_CLOCK_DIV_381 = 0xE
MPU6050_CLOCK_DIV_364 = 0xF
MPU6050_I2C_SLV_RW_BIT = 7
MPU6050_I2C_SLV_ADDR_BIT = 6
MPU6050_I2C_SLV_ADDR_LENGTH = 7
MPU6050_I2C_SLV_EN_BIT = 7
MPU6050_I2C_SLV_BYTE_SW_BIT = 6
MPU6050_I2C_SLV_REG_DIS_BIT = 5
MPU6050_I2C_SLV_GRP_BIT = 4
MPU6050_I2C_SLV_LEN_BIT = 3
MPU6050_I2C_SLV_LEN_LENGTH = 4
MPU6050_I2C_SLV4_RW_BIT = 7
MPU6050_I2C_SLV4_ADDR_BIT = 6
MPU6050_I2C_SLV4_ADDR_LENGTH = 7
MPU6050_I2C_SLV4_EN_BIT = 7
MPU6050_I2C_SLV4_INT_EN_BIT = 6
MPU6050_I2C_SLV4_REG_DIS_BIT = 5
MPU6050_I2C_SLV4_MST_DLY_BIT = 4
MPU6050_I2C_SLV4_MST_DLY_LENGTH = 5
MPU6050_MST_PASS_THROUGH_BIT = 7
MPU6050_MST_I2C_SLV4_DONE_BIT = 6
MPU6050_MST_I2C_LOST_ARB_BIT = 5
MPU6050_MST_I2C_SLV4_NACK_BIT = 4
MPU6050_MST_I2C_SLV3_NACK_BIT = 3
MPU6050_MST_I2C_SLV2_NACK_BIT = 2
MPU6050_MST_I2C_SLV1_NACK_BIT = 1
MPU6050_MST_I2C_SLV0_NACK_BIT = 0
MPU6050_INTCFG_INT_LEVEL_BIT = 7
MPU6050_INTCFG_INT_OPEN_BIT = 6
MPU6050_INTCFG_LATCH_INT_EN_BIT = 5
MPU6050_INTCFG_INT_RD_CLEAR_BIT = 4
MPU6050_INTCFG_FSYNC_INT_LEVEL_BIT = 3
MPU6050_INTCFG_FSYNC_INT_EN_BIT = 2
MPU6050_INTCFG_I2C_BYPASS_EN_BIT = 1
MPU6050_INTCFG_CLKOUT_EN_BIT = 0
MPU6050_INTMODE_ACTIVEHIGH = 0x00
MPU6050_INTMODE_ACTIVELOW = 0x01
MPU6050_INTDRV_PUSHPULL = 0x00
MPU6050_INTDRV_OPENDRAIN = 0x01
MPU6050_INTLATCH_50USPULSE = 0x00
MPU6050_INTLATCH_WAITCLEAR = 0x01
MPU6050_INTCLEAR_STATUSREAD = 0x00
MPU6050_INTCLEAR_ANYREAD = 0x01
MPU6050_INTERRUPT_FF_BIT = 7
MPU6050_INTERRUPT_MOT_BIT = 6
MPU6050_INTERRUPT_ZMOT_BIT = 5
MPU6050_INTERRUPT_FIFO_OFLOW_BIT = 4
MPU6050_INTERRUPT_I2C_MST_INT_BIT = 3
MPU6050_INTERRUPT_PLL_RDY_INT_BIT = 2
MPU6050_INTERRUPT_DMP_INT_BIT = 1
MPU6050_INTERRUPT_DATA_RDY_BIT = 0
# TODO: figure out what these actually do
# UMPL source code is not very obivous
MPU6050_DMPINT_5_BIT = 5
MPU6050_DMPINT_4_BIT = 4
MPU6050_DMPINT_3_BIT = 3
MPU6050_DMPINT_2_BIT = 2
MPU6050_DMPINT_1_BIT = 1
MPU6050_DMPINT_0_BIT = 0
MPU6050_MOTION_MOT_XNEG_BIT = 7
MPU6050_MOTION_MOT_XPOS_BIT = 6
MPU6050_MOTION_MOT_YNEG_BIT = 5
MPU6050_MOTION_MOT_YPOS_BIT = 4
MPU6050_MOTION_MOT_ZNEG_BIT = 3
MPU6050_MOTION_MOT_ZPOS_BIT = 2
MPU6050_MOTION_MOT_ZRMOT_BIT = 0
MPU6050_DELAYCTRL_DELAY_ES_SHADOW_BIT = 7
MPU6050_DELAYCTRL_I2C_SLV4_DLY_EN_BIT = 4
MPU6050_DELAYCTRL_I2C_SLV3_DLY_EN_BIT = 3
MPU6050_DELAYCTRL_I2C_SLV2_DLY_EN_BIT = 2
MPU6050_DELAYCTRL_I2C_SLV1_DLY_EN_BIT = 1
MPU6050_DELAYCTRL_I2C_SLV0_DLY_EN_BIT = 0
MPU6050_PATHRESET_GYRO_RESET_BIT = 2
MPU6050_PATHRESET_ACCEL_RESET_BIT = 1
MPU6050_PATHRESET_TEMP_RESET_BIT = 0
MPU6050_DETECT_ACCEL_ON_DELAY_BIT = 5
MPU6050_DETECT_ACCEL_ON_DELAY_LENGTH = 2
MPU6050_DETECT_FF_COUNT_BIT = 3
MPU6050_DETECT_FF_COUNT_LENGTH = 2
MPU6050_DETECT_MOT_COUNT_BIT = 1
MPU6050_DETECT_MOT_COUNT_LENGTH = 2
MPU6050_DETECT_DECREMENT_RESET = 0x0
MPU6050_DETECT_DECREMENT_1 = 0x1
MPU6050_DETECT_DECREMENT_2 = 0x2
MPU6050_DETECT_DECREMENT_4 = 0x3
MPU6050_USERCTRL_DMP_EN_BIT = 7
MPU6050_USERCTRL_FIFO_EN_BIT = 6
MPU6050_USERCTRL_I2C_MST_EN_BIT = 5
MPU6050_USERCTRL_I2C_IF_DIS_BIT = 4
MPU6050_USERCTRL_DMP_RESET_BIT = 3
MPU6050_USERCTRL_FIFO_RESET_BIT = 2
MPU6050_USERCTRL_I2C_MST_RESET_BIT = 1
MPU6050_USERCTRL_SIG_COND_RESET_BIT = 0
MPU6050_PWR1_DEVICE_RESET_BIT = 7
MPU6050_PWR1_SLEEP_BIT = 6
MPU6050_PWR1_CYCLE_BIT = 5
MPU6050_PWR1_TEMP_DIS_BIT = 3
MPU6050_PWR1_CLKSEL_BIT = 2
MPU6050_PWR1_CLKSEL_LENGTH = 3
MPU6050_CLOCK_INTERNAL = 0x00
MPU6050_CLOCK_PLL_XGYRO = 0x01
MPU6050_CLOCK_PLL_YGYRO = 0x02
MPU6050_CLOCK_PLL_ZGYRO = 0x03
MPU6050_CLOCK_PLL_EXT32K = 0x04
MPU6050_CLOCK_PLL_EXT19M = 0x05
MPU6050_CLOCK_KEEP_RESET = 0x07
MPU6050_PWR2_LP_WAKE_CTRL_BIT = 7
MPU6050_PWR2_LP_WAKE_CTRL_LENGTH = 2
MPU6050_PWR2_STBY_XA_BIT = 5
MPU6050_PWR2_STBY_YA_BIT = 4
MPU6050_PWR2_STBY_ZA_BIT = 3
MPU6050_PWR2_STBY_XG_BIT = 2
MPU6050_PWR2_STBY_YG_BIT = 1
MPU6050_PWR2_STBY_ZG_BIT = 0
MPU6050_WAKE_FREQ_1P25 = 0x0
MPU6050_WAKE_FREQ_2P5 = 0x1
MPU6050_WAKE_FREQ_5 = 0x2
MPU6050_WAKE_FREQ_10 = 0x3
MPU6050_BANKSEL_PRFTCH_EN_BIT = 6
MPU6050_BANKSEL_CFG_USER_BANK_BIT = 5
MPU6050_BANKSEL_MEM_SEL_BIT = 4
MPU6050_BANKSEL_MEM_SEL_LENGTH = 5
MPU6050_WHO_AM_I_BIT = 6
MPU6050_WHO_AM_I_LENGTH = 6
# DMP
MPU6050_DMP_MEMORY_BANKS = 8
MPU6050_DMP_MEMORY_BANK_SIZE = 256
MPU6050_DMP_MEMORY_CHUNK_SIZE = 16
MPU6050_DMP_CODE_SIZE = 1929 # dmpMemory[]
MPU6050_DMP_CONFIG_SIZE = 192 # dmpConfig[]
MPU6050_DMP_UPDATES_SIZE = 47 # dmpUpdates[]
# ====================================================================================================
# | Default MotionApps v2.0 42-byte FIFO packet structure:
# |
# |
# |
# | [QUAT W][ ][QUAT X][ ][QUAT Y][ ][QUAT Z][ ][GYRO X][ ][GYRO Y][ ] |
# | 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
# | |
# | [GYRO Z][ ][ACC X ][ ][ACC Y ][ ][ACC Z ][ ][ ] |
# | 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
# ====================================================================================================
# this block of memory gets written to the MPU on start-up, and it seems
# to be volatile memory, so it has to be done each time (it only takes
# ~1 second though)
dmpMemory = [
# bank 0, 256 bytes
0xFB, 0x00, 0x00, 0x3E, 0x00, 0x0B, 0x00, 0x36, 0x00, 0x01, 0x00, 0x02,
0x00, 0x03, 0x00, 0x00,
0x00, 0x65, 0x00, 0x54, 0xFF, 0xEF, 0x00, 0x00, 0xFA, 0x80, 0x00, 0x0B,
0x12, 0x82, 0x00, 0x01,
0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x28, 0x00, 0x00, 0xFF, 0xFF, 0x45, 0x81, 0xFF, 0xFF, 0xFA, 0x72,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x03, 0xE8, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x7F, 0xFF,
0xFF, 0xFE, 0x80, 0x01,
0x00, 0x1B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x3E, 0x03, 0x30, 0x40, 0x00, 0x00, 0x00, 0x02, 0xCA, 0xE3, 0x09,
0x3E, 0x80, 0x00, 0x00,
0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
0x60, 0x00, 0x00, 0x00,
0x41, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x0B, 0x2A, 0x00, 0x00, 0x16, 0x55,
0x00, 0x00, 0x21, 0x82,
0xFD, 0x87, 0x26, 0x50, 0xFD, 0x80, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00,
0x00, 0x05, 0x80, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x00, 0x03, 0x00, 0x00,
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x6F, 0x00, 0x02, 0x65, 0x32,
0x00, 0x00, 0x5E, 0xC0,
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0xFB, 0x8C, 0x6F, 0x5D, 0xFD, 0x5D, 0x08, 0xD9, 0x00, 0x7C, 0x73, 0x3B,
0x00, 0x6C, 0x12, 0xCC,
0x32, 0x00, 0x13, 0x9D, 0x32, 0x00, 0xD0, 0xD6, 0x32, 0x00, 0x08, 0x00,
0x40, 0x00, 0x01, 0xF4,
0xFF, 0xE6, 0x80, 0x79, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD0, 0xD6,
0x00, 0x00, 0x27, 0x10,
# bank 1, 256 bytes
0xFB, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0xFA, 0x36, 0xFF, 0xBC, 0x30, 0x8E, 0x00, 0x05, 0xFB, 0xF0,
0xFF, 0xD9, 0x5B, 0xC8,
0xFF, 0xD0, 0x9A, 0xBE, 0x00, 0x00, 0x10, 0xA9, 0xFF, 0xF4, 0x1E, 0xB2,
0x00, 0xCE, 0xBB, 0xF7,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x02, 0x00, 0x02,
0x02, 0x00, 0x00, 0x0C,
0xFF, 0xC2, 0x80, 0x00, 0x00, 0x01, 0x80, 0x00, 0x00, 0xCF, 0x80, 0x00,
0x40, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00,
0x00, 0x00, 0x00, 0x14,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x03, 0x3F, 0x68, 0xB6, 0x79, 0x35, 0x28, 0xBC,
0xC6, 0x7E, 0xD1, 0x6C,
0x80, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB2, 0x6A,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xF0,
0x00, 0x00, 0x00, 0x30,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x25, 0x4D, 0x00, 0x2F, 0x70, 0x6D, 0x00, 0x00, 0x05, 0xAE,
0x00, 0x0C, 0x02, 0xD0,
# bank 2, 256 bytes
0x00, 0x00, 0x00, 0x00, 0x00, 0x65, 0x00, 0x54, 0xFF, 0xEF, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x01, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x00,
0x00, 0x00, 0x01, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00,
0xFF, 0xEF, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x1B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x40, 0x00, 0x00, 0x00,
0x00, 0x1B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
# bank 3, 256 bytes
0xD8, 0xDC, 0xBA, 0xA2, 0xF1, 0xDE, 0xB2, 0xB8, 0xB4, 0xA8, 0x81, 0x91,
0xF7, 0x4A, 0x90, 0x7F,
0x91, 0x6A, 0xF3, 0xF9, 0xDB, 0xA8, 0xF9, 0xB0, 0xBA, 0xA0, 0x80, 0xF2,
0xCE, 0x81, 0xF3, 0xC2,
0xF1, 0xC1, 0xF2, 0xC3, 0xF3, 0xCC, 0xA2, 0xB2, 0x80, 0xF1, 0xC6, 0xD8,
0x80, 0xBA, 0xA7, 0xDF,
0xDF, 0xDF, 0xF2, 0xA7, 0xC3, 0xCB, 0xC5, 0xB6, 0xF0, 0x87, 0xA2, 0x94,
0x24, 0x48, 0x70, 0x3C,
0x95, 0x40, 0x68, 0x34, 0x58, 0x9B, 0x78, 0xA2, 0xF1, 0x83, 0x92, 0x2D,
0x55, 0x7D, 0xD8, 0xB1,
0xB4, 0xB8, 0xA1, 0xD0, 0x91, 0x80, 0xF2, 0x70, 0xF3, 0x70, 0xF2, 0x7C,
0x80, 0xA8, 0xF1, 0x01,
0xB0, 0x98, 0x87, 0xD9, 0x43, 0xD8, 0x86, 0xC9, 0x88, 0xBA, 0xA1, 0xF2,
0x0E, 0xB8, 0x97, 0x80,
0xF1, 0xA9, 0xDF, 0xDF, 0xDF, 0xAA, 0xDF, 0xDF, 0xDF, 0xF2, 0xAA, 0xC5,
0xCD, 0xC7, 0xA9, 0x0C,
0xC9, 0x2C, 0x97, 0x97, 0x97, 0x97, 0xF1, 0xA9, 0x89, 0x26, 0x46, 0x66,
0xB0, 0xB4, 0xBA, 0x80,
0xAC, 0xDE, 0xF2, 0xCA, 0xF1, 0xB2, 0x8C, 0x02, 0xA9, 0xB6, 0x98, 0x00,
0x89, 0x0E, 0x16, 0x1E,
0xB8, 0xA9, 0xB4, 0x99, 0x2C, 0x54, 0x7C, 0xB0, 0x8A, 0xA8, 0x96, 0x36,
0x56, 0x76, 0xF1, 0xB9,
0xAF, 0xB4, 0xB0, 0x83, 0xC0, 0xB8, 0xA8, 0x97, 0x11, 0xB1, 0x8F, 0x98,
0xB9, 0xAF, 0xF0, 0x24,
0x08, 0x44, 0x10, 0x64, 0x18, 0xF1, 0xA3, 0x29, 0x55, 0x7D, 0xAF, 0x83,
0xB5, 0x93, 0xAF, 0xF0,
0x00, 0x28, 0x50, 0xF1, 0xA3, 0x86, 0x9F, 0x61, 0xA6, 0xDA, 0xDE, 0xDF,
0xD9, 0xFA, 0xA3, 0x86,
0x96, 0xDB, 0x31, 0xA6, 0xD9, 0xF8, 0xDF, 0xBA, 0xA6, 0x8F, 0xC2, 0xC5,
0xC7, 0xB2, 0x8C, 0xC1,
0xB8, 0xA2, 0xDF, 0xDF, 0xDF, 0xA3, 0xDF, 0xDF, 0xDF, 0xD8, 0xD8, 0xF1,
0xB8, 0xA8, 0xB2, 0x86,
# bank 4, 256 bytes
0xB4, 0x98, 0x0D, 0x35, 0x5D, 0xB8, 0xAA, 0x98, 0xB0, 0x87, 0x2D, 0x35,
0x3D, 0xB2, 0xB6, 0xBA,
0xAF, 0x8C, 0x96, 0x19, 0x8F, 0x9F, 0xA7, 0x0E, 0x16, 0x1E, 0xB4, 0x9A,
0xB8, 0xAA, 0x87, 0x2C,
0x54, 0x7C, 0xB9, 0xA3, 0xDE, 0xDF, 0xDF, 0xA3, 0xB1, 0x80, 0xF2, 0xC4,
0xCD, 0xC9, 0xF1, 0xB8,
0xA9, 0xB4, 0x99, 0x83, 0x0D, 0x35, 0x5D, 0x89, 0xB9, 0xA3, 0x2D, 0x55,
0x7D, 0xB5, 0x93, 0xA3,
0x0E, 0x16, 0x1E, 0xA9, 0x2C, 0x54, 0x7C, 0xB8, 0xB4, 0xB0, 0xF1, 0x97,
0x83, 0xA8, 0x11, 0x84,
0xA5, 0x09, 0x98, 0xA3, 0x83, 0xF0, 0xDA, 0x24, 0x08, 0x44, 0x10, 0x64,
0x18, 0xD8, 0xF1, 0xA5,
0x29, 0x55, 0x7D, 0xA5, 0x85, 0x95, 0x02, 0x1A, 0x2E, 0x3A, 0x56, 0x5A,
0x40, 0x48, 0xF9, 0xF3,
0xA3, 0xD9, 0xF8, 0xF0, 0x98, 0x83, 0x24, 0x08, 0x44, 0x10, 0x64, 0x18,
0x97, 0x82, 0xA8, 0xF1,
0x11, 0xF0, 0x98, 0xA2, 0x24, 0x08, 0x44, 0x10, 0x64, 0x18, 0xDA, 0xF3,
0xDE, 0xD8, 0x83, 0xA5,
0x94, 0x01, 0xD9, 0xA3, 0x02, 0xF1, 0xA2, 0xC3, 0xC5, 0xC7, 0xD8, 0xF1,
0x84, 0x92, 0xA2, 0x4D,
0xDA, 0x2A, 0xD8, 0x48, 0x69, 0xD9, 0x2A, 0xD8, 0x68, 0x55, 0xDA, 0x32,
0xD8, 0x50, 0x71, 0xD9,
0x32, 0xD8, 0x70, 0x5D, 0xDA, 0x3A, 0xD8, 0x58, 0x79, 0xD9, 0x3A, 0xD8,
0x78, 0x93, 0xA3, 0x4D,
0xDA, 0x2A, 0xD8, 0x48, 0x69, 0xD9, 0x2A, 0xD8, 0x68, 0x55, 0xDA, 0x32,
0xD8, 0x50, 0x71, 0xD9,
0x32, 0xD8, 0x70, 0x5D, 0xDA, 0x3A, 0xD8, 0x58, 0x79, 0xD9, 0x3A, 0xD8,
0x78, 0xA8, 0x8A, 0x9A,
0xF0, 0x28, 0x50, 0x78, 0x9E, 0xF3, 0x88, 0x18, 0xF1, 0x9F, 0x1D, 0x98,
0xA8, 0xD9, 0x08, 0xD8,
0xC8, 0x9F, 0x12, 0x9E, 0xF3, 0x15, 0xA8, 0xDA, 0x12, 0x10, 0xD8, 0xF1,
0xAF, 0xC8, 0x97, 0x87,
# bank 5, 256 bytes
0x34, 0xB5, 0xB9, 0x94, 0xA4, 0x21, 0xF3, 0xD9, 0x22, 0xD8, 0xF2, 0x2D,
0xF3, 0xD9, 0x2A, 0xD8,
0xF2, 0x35, 0xF3, 0xD9, 0x32, 0xD8, 0x81, 0xA4, 0x60, 0x60, 0x61, 0xD9,
0x61, 0xD8, 0x6C, 0x68,
0x69, 0xD9, 0x69, 0xD8, 0x74, 0x70, 0x71, 0xD9, 0x71, 0xD8, 0xB1, 0xA3,
0x84, 0x19, 0x3D, 0x5D,
0xA3, 0x83, 0x1A, 0x3E, 0x5E, 0x93, 0x10, 0x30, 0x81, 0x10, 0x11, 0xB8,
0xB0, 0xAF, 0x8F, 0x94,
0xF2, 0xDA, 0x3E, 0xD8, 0xB4, 0x9A, 0xA8, 0x87, 0x29, 0xDA, 0xF8, 0xD8,
0x87, 0x9A, 0x35, 0xDA,
0xF8, 0xD8, 0x87, 0x9A, 0x3D, 0xDA, 0xF8, 0xD8, 0xB1, 0xB9, 0xA4, 0x98,
0x85, 0x02, 0x2E, 0x56,
0xA5, 0x81, 0x00, 0x0C, 0x14, 0xA3, 0x97, 0xB0, 0x8A, 0xF1, 0x2D, 0xD9,
0x28, 0xD8, 0x4D, 0xD9,
0x48, 0xD8, 0x6D, 0xD9, 0x68, 0xD8, 0xB1, 0x84, 0x0D, 0xDA, 0x0E, 0xD8,
0xA3, 0x29, 0x83, 0xDA,
0x2C, 0x0E, 0xD8, 0xA3, 0x84, 0x49, 0x83, 0xDA, 0x2C, 0x4C, 0x0E, 0xD8,
0xB8, 0xB0, 0xA8, 0x8A,
0x9A, 0xF5, 0x20, 0xAA, 0xDA, 0xDF, 0xD8, 0xA8, 0x40, 0xAA, 0xD0, 0xDA,
0xDE, 0xD8, 0xA8, 0x60,
0xAA, 0xDA, 0xD0, 0xDF, 0xD8, 0xF1, 0x97, 0x86, 0xA8, 0x31, 0x9B, 0x06,
0x99, 0x07, 0xAB, 0x97,
0x28, 0x88, 0x9B, 0xF0, 0x0C, 0x20, 0x14, 0x40, 0xB8, 0xB0, 0xB4, 0xA8,
0x8C, 0x9C, 0xF0, 0x04,
0x28, 0x51, 0x79, 0x1D, 0x30, 0x14, 0x38, 0xB2, 0x82, 0xAB, 0xD0, 0x98,
0x2C, 0x50, 0x50, 0x78,
0x78, 0x9B, 0xF1, 0x1A, 0xB0, 0xF0, 0x8A, 0x9C, 0xA8, 0x29, 0x51, 0x79,
0x8B, 0x29, 0x51, 0x79,
0x8A, 0x24, 0x70, 0x59, 0x8B, 0x20, 0x58, 0x71, 0x8A, 0x44, 0x69, 0x38,
0x8B, 0x39, 0x40, 0x68,
0x8A, 0x64, 0x48, 0x31, 0x8B, 0x30, 0x49, 0x60, 0xA5, 0x88, 0x20, 0x09,
0x71, 0x58, 0x44, 0x68,
# bank 6, 256 bytes
0x11, 0x39, 0x64, 0x49, 0x30, 0x19, 0xF1, 0xAC, 0x00, 0x2C, 0x54, 0x7C,
0xF0, 0x8C, 0xA8, 0x04,
0x28, 0x50, 0x78, 0xF1, 0x88, 0x97, 0x26, 0xA8, 0x59, 0x98, 0xAC, 0x8C,
0x02, 0x26, 0x46, 0x66,
0xF0, 0x89, 0x9C, 0xA8, 0x29, 0x51, 0x79, 0x24, 0x70, 0x59, 0x44, 0x69,
0x38, 0x64, 0x48, 0x31,
0xA9, 0x88, 0x09, 0x20, 0x59, 0x70, 0xAB, 0x11, 0x38, 0x40, 0x69, 0xA8,
0x19, 0x31, 0x48, 0x60,
0x8C, 0xA8, 0x3C, 0x41, 0x5C, 0x20, 0x7C, 0x00, 0xF1, 0x87, 0x98, 0x19,
0x86, 0xA8, 0x6E, 0x76,
0x7E, 0xA9, 0x99, 0x88, 0x2D, 0x55, 0x7D, 0x9E, 0xB9, 0xA3, 0x8A, 0x22,
0x8A, 0x6E, 0x8A, 0x56,
0x8A, 0x5E, 0x9F, 0xB1, 0x83, 0x06, 0x26, 0x46, 0x66, 0x0E, 0x2E, 0x4E,
0x6E, 0x9D, 0xB8, 0xAD,
0x00, 0x2C, 0x54, 0x7C, 0xF2, 0xB1, 0x8C, 0xB4, 0x99, 0xB9, 0xA3, 0x2D,
0x55, 0x7D, 0x81, 0x91,
0xAC, 0x38, 0xAD, 0x3A, 0xB5, 0x83, 0x91, 0xAC, 0x2D, 0xD9, 0x28, 0xD8,
0x4D, 0xD9, 0x48, 0xD8,
0x6D, 0xD9, 0x68, 0xD8, 0x8C, 0x9D, 0xAE, 0x29, 0xD9, 0x04, 0xAE, 0xD8,
0x51, 0xD9, 0x04, 0xAE,
0xD8, 0x79, 0xD9, 0x04, 0xD8, 0x81, 0xF3, 0x9D, 0xAD, 0x00, 0x8D, 0xAE,
0x19, 0x81, 0xAD, 0xD9,
0x01, 0xD8, 0xF2, 0xAE, 0xDA, 0x26, 0xD8, 0x8E, 0x91, 0x29, 0x83, 0xA7,
0xD9, 0xAD, 0xAD, 0xAD,
0xAD, 0xF3, 0x2A, 0xD8, 0xD8, 0xF1, 0xB0, 0xAC, 0x89, 0x91, 0x3E, 0x5E,
0x76, 0xF3, 0xAC, 0x2E,
0x2E, 0xF1, 0xB1, 0x8C, 0x5A, 0x9C, 0xAC, 0x2C, 0x28, 0x28, 0x28, 0x9C,
0xAC, 0x30, 0x18, 0xA8,
0x98, 0x81, 0x28, 0x34, 0x3C, 0x97, 0x24, 0xA7, 0x28, 0x34, 0x3C, 0x9C,
0x24, 0xF2, 0xB0, 0x89,
0xAC, 0x91, 0x2C, 0x4C, 0x6C, 0x8A, 0x9B, 0x2D, 0xD9, 0xD8, 0xD8, 0x51,
0xD9, 0xD8, 0xD8, 0x79,
# bank 7, 138 bytes (remainder)
0xD9, 0xD8, 0xD8, 0xF1, 0x9E, 0x88, 0xA3, 0x31, 0xDA, 0xD8, 0xD8, 0x91,
0x2D, 0xD9, 0x28, 0xD8,
0x4D, 0xD9, 0x48, 0xD8, 0x6D, 0xD9, 0x68, 0xD8, 0xB1, 0x83, 0x93, 0x35,
0x3D, 0x80, 0x25, 0xDA,
0xD8, 0xD8, 0x85, 0x69, 0xDA, 0xD8, 0xD8, 0xB4, 0x93, 0x81, 0xA3, 0x28,
0x34, 0x3C, 0xF3, 0xAB,
0x8B, 0xF8, 0xA3, 0x91, 0xB6, 0x09, 0xB4, 0xD9, 0xAB, 0xDE, 0xFA, 0xB0,
0x87, 0x9C, 0xB9, 0xA3,
0xDD, 0xF1, 0xA3, 0xA3, 0xA3, 0xA3, 0x95, 0xF1, 0xA3, 0xA3, 0xA3, 0x9D,
0xF1, 0xA3, 0xA3, 0xA3,
0xA3, 0xF2, 0xA3, 0xB4, 0x90, 0x80, 0xF2, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3,
0xA3, 0xA3, 0xA3, 0xA3,
0xA3, 0xB2, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xB0, 0x87, 0xB5, 0x99,
0xF1, 0xA3, 0xA3, 0xA3,
0x98, 0xF1, 0xA3, 0xA3, 0xA3, 0xA3, 0x97, 0xA3, 0xA3, 0xA3, 0xA3, 0xF3,
0x9B, 0xA3, 0xA3, 0xDC,
0xB9, 0xA7, 0xF1, 0x26, 0x26, 0x26, 0xD8, 0xD8, 0xFF]
dmpConfig = [
# BANK OFFSET LENGTH [DATA]
0x03, 0x7B, 0x03, 0x4C, 0xCD, 0x6C, # FCFG_1 inv_set_gyro_calibration
0x03, 0xAB, 0x03, 0x36, 0x56, 0x76, # FCFG_3 inv_set_gyro_calibration
0x00, 0x68, 0x04, 0x02, 0xCB, 0x47, 0xA2,
# D_0_104 inv_set_gyro_calibration
0x02, 0x18, 0x04, 0x00, 0x05, 0x8B, 0xC1,
# D_0_24 inv_set_gyro_calibration
0x01, 0x0C, 0x04, 0x00, 0x00, 0x00, 0x00,
# D_1_152 inv_set_accel_calibration
0x03, 0x7F, 0x06, 0x0C, 0xC9, 0x2C, 0x97, 0x97, 0x97,
# FCFG_2 inv_set_accel_calibration
0x03, 0x89, 0x03, 0x26, 0x46, 0x66, # FCFG_7 inv_set_accel_calibration
0x00, 0x6C, 0x02, 0x20, 0x00, # D_0_108 inv_set_accel_calibration
0x02, 0x40, 0x04, 0x00, 0x00, 0x00, 0x00,
# CPASS_MTX_00 inv_set_compass_calibration
0x02, 0x44, 0x04, 0x00, 0x00, 0x00, 0x00, # CPASS_MTX_01
0x02, 0x48, 0x04, 0x00, 0x00, 0x00, 0x00, # CPASS_MTX_02
0x02, 0x4C, 0x04, 0x00, 0x00, 0x00, 0x00, # CPASS_MTX_10
0x02, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, # CPASS_MTX_11
0x02, 0x54, 0x04, 0x00, 0x00, 0x00, 0x00, # CPASS_MTX_12
0x02, 0x58, 0x04, 0x00, 0x00, 0x00, 0x00, # CPASS_MTX_20
0x02, 0x5C, 0x04, 0x00, 0x00, 0x00, 0x00, # CPASS_MTX_21
0x02, 0xBC, 0x04, 0x00, 0x00, 0x00, 0x00, # CPASS_MTX_22
0x01, 0xEC, 0x04, 0x00, 0x00, 0x40, 0x00,
# D_1_236 inv_apply_endian_accel
0x03, 0x7F, 0x06, 0x0C, 0xC9, 0x2C, 0x97, 0x97, 0x97,
# FCFG_2 inv_set_mpu_sensors
0x04, 0x02, 0x03, 0x0D, 0x35, 0x5D,
# CFG_MOTION_BIAS inv_turn_on_bias_from_no_motion
0x04, 0x09, 0x04, 0x87, 0x2D, 0x35, 0x3D, # FCFG_5 inv_set_bias_update
0x00, 0xA3, 0x01, 0x00, # D_0_163 inv_set_dead_zone
# SPECIAL 0x01 = enable interrupts
0x00, 0x00, 0x00, 0x01, # SET INT_ENABLE at i=22, SPECIAL INSTRUCTION
0x07, 0x86, 0x01, 0xFE, # CFG_6 inv_set_fifo_interupt
0x07, 0x41, 0x05, 0xF1, 0x20, 0x28, 0x30, 0x38,
# CFG_8 inv_send_quaternion
0x07, 0x7E, 0x01, 0x30, # CFG_16 inv_set_footer
0x07, 0x46, 0x01, 0x9A, # CFG_GYRO_SOURCE inv_send_gyro
0x07, 0x47, 0x04, 0xF1, 0x28, 0x30, 0x38,
# CFG_9 inv_send_gyro -> inv_construct3_fifo
0x07, 0x6C, 0x04, 0xF1, 0x28, 0x30, 0x38,
# CFG_12 inv_send_accel -> inv_construct3_fifo
0x02, 0x16, 0x02, 0x00, 0x05 # D_0_22 inv_set_fifo_rate
# This very last 0x01 WAS a 0x09, which drops the FIFO rate down to
# 20 Hz. 0x07 is 25 Hz,
# 0x01 is 100Hz. Going faster than 100Hz (0x00=200Hz) tends to
# result in very noisy data.
# DMP output frequency is calculated easily using this equation: (
# 200Hz / (1 + value))
# It is important to make sure the host processor can keep up with
# reading and processing
# the FIFO output at the desired rate. Handling FIFO overflow
# cleanly is also a good idea.
]
dmpUpdates = [
0x01, 0xB2, 0x02, 0xFF, 0xFF,
0x01, 0x90, 0x04, 0x09, 0x23, 0xA1, 0x35,
0x01, 0x6A, 0x02, 0x06, 0x00,
0x01, 0x60, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x04, 0x40, 0x00, 0x00, 0x00,
0x01, 0x62, 0x02, 0x00, 0x00,
0x00, 0x60, 0x04, 0x00, 0x40, 0x00, 0x00]
# Setting up internal 42-byte (default) DMP packet buffer
dmpPacketSize = 42
# construct a new object with the I2C address of the MPU6050
def __init__(self, address=MPU6050_DEFAULT_ADDRESS, bus=None):
self.i2c = PyComms(address, bus)
self.address = address
def initialize(self):
self.setClockSource(self.MPU6050_CLOCK_PLL_XGYRO)
self.setFullScaleGyroRange(self.MPU6050_GYRO_FS_250)
self.setFullScaleAccelRange(self.MPU6050_ACCEL_FS_2)
self.setSleepEnabled(False)
def testConnection(self):
return self.getDeviceID() == 0x34
def getAuxVDDIOLevel(self):
return self.i2c.readBit(self.MPU6050_RA_YG_OFFS_TC,
self.MPU6050_TC_PWR_MODE_BIT)
def setAuxVDDIOLevel(self, level):
self.i2c.writeBit(self.MPU6050_RA_YG_OFFS_TC,
self.MPU6050_TC_PWR_MODE_BIT, level)
def getRate(self):
return self.i2c.readU8(self.MPU6050_RA_SMPLRT_DIV)
def setRate(self, value):
self.i2c.write8(self.MPU6050_RA_SMPLRT_DIV, value)
def getExternalFrameSync(self):
return self.i2c.readBits(self.MPU6050_RA_CONFIG,
self.MPU6050_CFG_EXT_SYNC_SET_BIT,
self.MPU6050_CFG_EXT_SYNC_SET_LENGTH)
def setExternalFrameSync(self, sync):
self.i2c.writeBits(self.MPU6050_RA_CONFIG,
self.MPU6050_CFG_EXT_SYNC_SET_BIT,
self.MPU6050_CFG_EXT_SYNC_SET_LENGTH, sync)
def getDLPFMode(self):
return self.i2c.readBits(self.MPU6050_RA_CONFIG,
self.MPU6050_CFG_DLPF_CFG_BIT,
self.MPU6050_CFG_DLPF_CFG_LENGTH)
def setDLPFMode(self, mode):
self.i2c.writeBits(self.MPU6050_RA_CONFIG,
self.MPU6050_CFG_DLPF_CFG_BIT,
self.MPU6050_CFG_DLPF_CFG_LENGTH, mode)
def getFullScaleGyroRange(self):
return self.i2c.readBits(self.MPU6050_RA_GYRO_CONFIG,
self.MPU6050_GCONFIG_FS_SEL_BIT,
self.MPU6050_GCONFIG_FS_SEL_LENGTH)
def setFullScaleGyroRange(self, range):
self.i2c.writeBits(self.MPU6050_RA_GYRO_CONFIG,
self.MPU6050_GCONFIG_FS_SEL_BIT,
self.MPU6050_GCONFIG_FS_SEL_LENGTH, range)
def getAccelXSelfTest(self):
return self.i2c.readBit(self.MPU6050_RA_ACCEL_CONFIG,
self.MPU6050_ACONFIG_XA_ST_BIT)
def setAccelXSelfTest(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_ACCEL_CONFIG,
self.MPU6050_ACONFIG_XA_ST_BIT, enabled)
def getAccelYSelfTest(self):
return self.readBit(self.MPU6050_RA_ACCEL_CONFIG,
self.MPU6050_ACONFIG_YA_ST_BIT)
def setAccelYSelfTest(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_ACCEL_CONFIG,
self.MPU6050_ACONFIG_YA_ST_BIT, enabled)
def getAccelZSelfTest(self):
return self.i2c.readBit(self.MPU6050_RA_ACCEL_CONFIG,
self.MPU6050_ACONFIG_ZA_ST_BIT)
def setAccelZSelfTest(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_ACCEL_CONFIG,
self.MPU6050_ACONFIG_ZA_ST_BIT, enabled)
def getFullScaleAccelRange(self):
return self.i2c.readBits(self.MPU6050_RA_ACCEL_CONFIG,
self.MPU6050_ACONFIG_AFS_SEL_BIT,
self.MPU6050_ACONFIG_AFS_SEL_LENGTH)
def setFullScaleAccelRange(self, value):
self.i2c.writeBits(self.MPU6050_RA_ACCEL_CONFIG,
self.MPU6050_ACONFIG_AFS_SEL_BIT,
self.MPU6050_ACONFIG_AFS_SEL_LENGTH, value)
def getDHPFMode(self):
return self.i2c.readBits(self.MPU6050_RA_ACCEL_CONFIG,
self.MPU6050_ACONFIG_ACCEL_HPF_BIT,
self.MPU6050_ACONFIG_ACCEL_HPF_LENGTH)
def setDHPFMode(self, bandwith):
self.i2c.writeBits(self.MPU6050_RA_ACCEL_CONFIG,
self.MPU6050_ACONFIG_ACCEL_HPF_BIT,
self.MPU6050_ACONFIG_ACCEL_HPF_LENGTH, bandwidth)
def getFreefallDetectionThreshold(self):
return self.i2c.readU8(self.MPU6050_RA_FF_THR)
def setFreefallDetectionThreshold(self, treshold):
self.i2c.write8(self.MPU6050_RA_FF_THR, treshold)
def getFreefallDetectionDuration(self):
return self.i2c.readU8(self.MPU6050_RA_FF_DUR)
def setFreefallDetectionDuration(self, duration):
self.i2c.write8(self.MPU6050_RA_FF_DUR)
def getMotionDetectionThreshold(self):
return self.i2c.readU8(self.MPU6050_RA_MOT_THR)
def setMotionDetectionThreshold(self, treshold):
self.i2c.write8(self.MPU6050_RA_MOT_THR, treshold)
def getMotionDetectionDuration(self):
return self.i2c.readU8(self.MPU6050_RA_MOT_DUR)
def setMotionDetectionDuration(self, duration):
self.i2c.write8(self.MPU6050_RA_MOT_DUR, duration)
def getZeroMotionDetectionThreshold(self):
return self.i2c.readU8(self.MPU6050_RA_ZRMOT_THR)
def setZeroMotionDetectionThreshold(self, treshold):
self.i2c.write8(self.MPU6050_RA_ZRMOT_THR, treshold)
def getZeroMotionDetectionDuration(self):
return self.i2c.readU8(self.MPU6050_RA_ZRMOT_DUR)
def setZeroMotionDetectionDuration(self, duration):
self.i2c.write8(self.MPU6050_RA_ZRMOT_DUR, duration)
def getTempFIFOEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_FIFO_EN,
self.MPU6050_TEMP_FIFO_EN_BIT)
def setTempFIFOEnabled(self, enabled):
self.i2c.write8(self.MPU6050_RA_FIFO_EN, self.MPU6050_TEMP_FIFO_EN_BIT,
enabled)
def getXGyroFIFOEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_FIFO_EN,
self.MPU6050_XG_FIFO_EN_BIT)
def setXGyroFIFOEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_FIFO_EN, self.MPU6050_XG_FIFO_EN_BIT,
enabled)
def getYGyroFIFOEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_FIFO_EN,
self.MPU6050_YG_FIFO_EN_BIT)
def setYGyroFIFOEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_FIFO_EN, self.MPU6050_YG_FIFO_EN_BIT,
enabled)
def getZGyroFIFOEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_FIFO_EN,
self.MPU6050_ZG_FIFO_EN_BIT)
def setZGyroFIFOEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_FIFO_EN, self.MPU6050_ZG_FIFO_EN_BIT,
enabled)
def getAccelFIFOEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_FIFO_EN,
self.MPU6050_ACCEL_FIFO_EN_BIT)
def setAccelFIFOEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_FIFO_EN,
self.MPU6050_ACCEL_FIFO_EN_BIT, enabled)
def getSlave2FIFOEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_FIFO_EN,
self.MPU6050_SLV2_FIFO_EN_BIT)
def setSlave2FIFOEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_FIFO_EN,
self.MPU6050_SLV2_FIFO_EN_BIT, enabled)
def getSlave1FIFOEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_FIFO_EN,
self.MPU6050_SLV1_FIFO_EN_BIT)
def setSlave1FIFOEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_FIFO_EN,
self.MPU6050_SLV1_FIFO_EN_BIT, enabled)
def getSlave0FIFOEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_FIFO_EN,
self.MPU6050_SLV0_FIFO_EN_BIT)
def setSlave0FIFOEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_FIFO_EN,
self.MPU6050_SLV0_FIFO_EN_BIT, enabled)
def getMultiMasterEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_CTRL,
self.MPU6050_MULT_MST_EN_BIT)
def setMultiMasterEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_I2C_MST_CTRL,
self.MPU6050_MULT_MST_EN_BIT, enabled)
def getWaitForExternalSensorEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_CTRL,
self.MPU6050_WAIT_FOR_ES_BIT)
def setWaitForExternalSensorEnabled(self, value):
self.i2c.writeBit(self.MPU6050_RA_I2C_MST_CTRL,
self.MPU6050_WAIT_FOR_ES_BIT, enabled)
def getSlave3FIFOEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_CTRL,
self.MPU6050_SLV_3_FIFO_EN_BIT)
def setSlave3FIFOEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_I2C_MST_CTRL,
self.MPU6050_SLV_3_FIFO_EN_BIT, enabled)
def getSlaveReadWriteTransitionEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_CTRL,
self.MPU6050_I2C_MST_P_NSR_BIT)
def setSlaveReadWriteTransitionEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_I2C_MST_CTRL,
self.MPU6050_I2C_MST_P_NSR_BIT, enabled)
def getMasterClockSpeed(self):
return self.i2c.readBits(self.MPU6050_RA_I2C_MST_CTRL,
self.MPU6050_I2C_MST_CLK_BIT,
self.MPU6050_I2C_MST_CLK_LENGTH)
def setMasterClockSpeed(self, speed):
self.i2c.writeBits(self.MPU6050_RA_I2C_MST_CTRL,
self.MPU6050_I2C_MST_CLK_BIT,
self.MPU6050_I2C_MST_CLK_LENGTH, speed)
def getSlaveAddress(self, num):
if num > 3:
return 0
return self.i2c.readU8(self.MPU6050_RA_I2C_SLV0_ADDR + num * 3)
def setSlaveAddress(self, num, address):
if num > 3:
return
self.i2c.write8(self.MPU6050_RA_I2C_SLV0_ADDR + num * 3, address)
def getSlaveRegister(self, num):
if num > 3:
return 0
return self.i2c.readU8(self.MPU6050_RA_I2C_SLV0_REG + num * 3)
def setSlaveRegister(self, num, reg):
if num > 3:
return
self.i2c.write8(self.MPU6050_RA_I2C_SLV0_REG + num * 3, reg)
def getSlaveEnabled(self, num):
return self.i2c.readBit(self.MPU6050_RA_I2C_SLV0_CTRL + num * 3,
self.MPU6050_I2C_SLV_EN_BIT)
def setSlaveEnabled(self, num, enabled):
if num > 3:
return
self.i2c.writeBit(self.MPU6050_RA_I2C_SLV0_CTRL + num * 3,
self.MPU6050_I2C_SLV_EN_BIT, enabled)
def getSlaveWordByteSwap(self, num):
if num > 3:
return 0
return self.i2c.readBit(self.MPU6050_RA_I2C_SLV0_CTRL + num * 3,
self.MPU6050_I2C_SLV_BYTE_SW_BIT)
def setSlaveWordByteSwap(self, num, enabled):
if num > 3:
return
self.i2c.writeBit(self.MPU6050_RA_I2C_SLV0_CTRL + num * 3,
self.MPU6050_I2C_SLV_BYTE_SW_BIT, enabled)
def getSlaveWriteMode(self, num):
if num > 3:
return 0
return self.i2c.readBit(self.MPU6050_RA_I2C_SLV0_CTRL + num * 3,
self.MPU6050_I2C_SLV_REG_DIS_BIT)
def setSlaveWriteMode(self, num, mode):
if num > 3:
return
self.i2c.writeBit(self.MPU6050_RA_I2C_SLV0_CTRL + num * 3,
self.MPU6050_I2C_SLV_REG_DIS_BIT, mode)
def getSlaveWordGroupOffset(self, num):
if num > 3:
return 0
return self.i2c.readBit(self.MPU6050_RA_I2C_SLV0_CTRL + num * 3,
self.MPU6050_I2C_SLV_GRP_BIT)
def setSlaveWordGroupOffset(self, num, enabled):
if num > 3:
return
self.i2c.writeBit(self.MPU6050_RA_I2C_SLV0_CTRL + num * 3,
self.MPU6050_I2C_SLV_GRP_BIT, enabled)
def getSlaveDataLength(self, num):
if num > 3:
return 0
return self.i2c.readBits(self.MPU6050_RA_I2C_SLV0_CTRL + num * 3,
self.MPU6050_I2C_SLV_LEN_BIT,
self.MPU6050_I2C_SLV_LEN_LENGTH)
def setSlaveDataLength(self, num, length):
if num > 3:
return
self.i2c.writeBits(self.MPU6050_RA_I2C_SLV0_CTRL + num * 3,
self.MPU6050_I2C_SLV_LEN_BIT,
self.MPU6050_I2C_SLV_LEN_LENGTH, length)
def getSlave4Address(self):
return self.i2c.readU8(self.MPU6050_RA_I2C_SLV4_ADDR)
def setSlave4Address(self, address):
self.i2c.write8(self.MPU6050_RA_I2C_SLV4_ADDR, address)
def getSlave4Register(self):
return self.i2c.readU8(self.MPU6050_RA_I2C_SLV4_REG)
def setSlave4Register(self, reg):
self.i2c.write8(self.MPU6050_RA_I2C_SLV4_REG, reg)
def setSlave4OutputByte(self, data):
self.i2c.write8(self.MPU6050_RA_I2C_SLV4_DO, data)
def getSlave4Enabled(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_SLV4_CTRL,
self.MPU6050_I2C_SLV4_EN_BIT)
def setSlave4Enabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_I2C_SLV4_CTRL,
self.MPU6050_I2C_SLV4_EN_BIT, enabled)
def getSlave4InterruptEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_SLV4_CTRL,
self.MPU6050_I2C_SLV4_INT_EN_BIT)
def setSlave4InterruptEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_I2C_SLV4_CTRL,
self.MPU6050_I2C_SLV4_INT_EN_BIT, enabled)
def getSlave4WriteMode(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_SLV4_CTRL,
self.MPU6050_I2C_SLV4_REG_DIS_BIT)
def setSlave4WriteMode(self, mode):
self.i2c.writeBit(self.MPU6050_RA_I2C_SLV4_CTRL,
self.MPU6050_I2C_SLV4_REG_DIS_BIT, mode)
def getSlave4MasterDelay(self):
return self.i2c.readBits(self.MPU6050_RA_I2C_SLV4_CTRL,
self.MPU6050_I2C_SLV4_MST_DLY_BIT,
self.MPU6050_I2C_SLV4_MST_DLY_LENGTH)
def setSlave4MasterDelay(self, delay):
self.i2c.writeBits(self.MPU6050_RA_I2C_SLV4_CTRL,
self.MPU6050_I2C_SLV4_MST_DLY_BIT,
self.MPU6050_I2C_SLV4_MST_DLY_LENGTH, delay)
def getSlate4InputByte(self):
return self.i2c.readU8(self.MPU6050_RA_I2C_SLV4_DI)
def getPassthroughStatus(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_STATUS,
self.MPU6050_MST_PASS_THROUGH_BIT)
def getSlave4IsDone(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_STATUS,
self.MPU6050_MST_I2C_SLV4_DONE_BIT)
def getLostArbitration(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_STATUS,
self.MPU6050_MST_I2C_LOST_ARB_BIT)
def getSlave4Nack(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_STATUS,
self.MPU6050_MST_I2C_SLV4_NACK_BIT)
def getSlave3Nack(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_STATUS,
self.MPU6050_MST_I2C_SLV3_NACK_BIT)
def getSlave2Nack(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_STATUS,
self.MPU6050_MST_I2C_SLV2_NACK_BIT)
def getSlave1Nack(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_STATUS,
self.MPU6050_MST_I2C_SLV1_NACK_BIT)
def getSlave0Nack(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_STATUS,
self.MPU6050_MST_I2C_SLV0_NACK_BIT)
def getInterruptMode(self):
return self.i2c.readBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_INT_LEVEL_BIT)
def setInterruptMode(self, mode):
self.i2c.writeBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_INT_LEVEL_BIT, mode)
def getInterruptDrive(self):
return self.i2c.readBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_INT_OPEN_BIT)
def setInterruptDrive(self, drive):
self.i2c.writeBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_INT_OPEN_BIT, drive)
def getInterruptLatch(self):
return self.i2c.readBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_LATCH_INT_EN_BIT)
def setInterruptLatch(self, latch):
self.i2c.writeBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_LATCH_INT_EN_BIT, latch)
def getInterruptLatchClear(self):
return self.i2c.readBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_INT_RD_CLEAR_BIT)
def setInterruptLatchClear(self, clear):
self.i2c.writeBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_INT_RD_CLEAR_BIT, clear)
def getFSyncInterruptLevel(self):
return self.i2c.readBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_FSYNC_INT_LEVEL_BIT)
def setFSyncInterruptLevel(self, level):
self.i2c.writeBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_FSYNC_INT_LEVEL_BIT, level)
def getFSyncInterruptEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_FSYNC_INT_EN_BIT)
def setFSyncInterruptEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_FSYNC_INT_EN_BIT, enabled)
def getI2CBypassEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_I2C_BYPASS_EN_BIT)
def setI2CBypassEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_I2C_BYPASS_EN_BIT, enabled)
def getClockOutputEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_CLKOUT_EN_BIT)
def setClockOutputEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_INT_PIN_CFG,
self.MPU6050_INTCFG_CLKOUT_EN_BIT, enabled)
def getIntEnabled(self):
return self.i2c.readU8(self.MPU6050_RA_INT_ENABLE)
def setIntEnabled(self, status):
self.i2c.write8(self.MPU6050_RA_INT_ENABLE, status)
def getIntFreefallEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_FF_BIT)
def setIntFreefallEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_FF_BIT, enabled)
def getIntMotionEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_MOT_BIT)
def setIntMotionEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_MOT_BIT, enabled)
def getIntZeroMotionEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_ZMOT_BIT)
def setIntZeroMotionEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_ZMOT_BIT, enabled)
def getIntFIFOBufferOverflowEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_FIFO_OFLOW_BIT)
def setIntFIFOBufferOverflowEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_FIFO_OFLOW_BIT, enabled)
def getIntI2CMasterEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_I2C_MST_INT_BIT)
def setIntI2CMasterEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_I2C_MST_INT_BIT, enabled)
def getIntDataReadyEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_DATA_RDY_BIT)
def setIntDataReadyEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_DATA_RDY_BIT, enabled)
def getIntStatus(self):
return self.i2c.readU8(self.MPU6050_RA_INT_STATUS)
def getIntFreefallStatus(self):
return self.i2c.readBit(self.MPU6050_RA_INT_STATUS,
self.MPU6050_INTERRUPT_FF_BIT)
def getIntMotionStatus(self):
return self.i2c.readBit(self.MPU6050_RA_INT_STATUS,
self.MPU6050_INTERRUPT_MOT_BIT)
def getIntZeroMotionStatus(self):
return self.i2c.readBit(self.MPU6050_RA_INT_STATUS,
self.MPU6050_INTERRUPT_ZMOT_BIT)
def getIntFIFOBufferOverflowStatus(self):
return self.i2c.readBit(self.MPU6050_RA_INT_STATUS,
self.MPU6050_INTERRUPT_FIFO_OFLOW_BIT)
def getIntI2CMasterStatus(self):
return self.i2c.readBit(self.MPU6050_RA_INT_STATUS,
self.MPU6050_INTERRUPT_I2C_MST_INT_BIT)
def getIntDataReadyStatus(self):
return self.i2c.readBit(self.MPU6050_RA_INT_STATUS,
self.MPU6050_INTERRUPT_DATA_RDY_BIT)
def getMotion9(self):
# unknown
pass
def getMotion6(self):
pass
def getAcceleration(self):
pass
def getAccelerationX(self):
pass
def getAccelerationY(self):
pass
def getAccelerationZ(self):
pass
def getTemperature(self):
pass
def getRotation(self):
pass
def getRotationX(self):
pass
def getRotationY(self):
pass
def getRotationZ(self):
pass
def getExternalSensorByte(self, position):
return self.i2c.readU8(self.MPU6050_RA_EXT_SENS_DATA_00 + position)
def getExternalSensorWord(self, position):
pass
def getExternalSensorDWord(self, position):
pass
def getXNegMotionDetected(self):
return self.i2c.readBit(self.MPU6050_RA_MOT_DETECT_STATUS,
self.MPU6050_MOTION_MOT_XNEG_BIT)
def getXPosMotionDetected(self):
return self.i2c.readBit(self.MPU6050_RA_MOT_DETECT_STATUS,
self.MPU6050_MOTION_MOT_XPOS_BIT)
def getYNegMotionDetected(self):
return self.i2c.readBit(self.MPU6050_RA_MOT_DETECT_STATUS,
self.MPU6050_MOTION_MOT_YNEG_BIT)
def getYPosMotionDetected(self):
return self.i2c.readBit(self.MPU6050_RA_MOT_DETECT_STATUS,
self.MPU6050_MOTION_MOT_YPOS_BIT)
def getZNegMotionDetected(self):
return self.i2c.readBit(self.MPU6050_RA_MOT_DETECT_STATUS,
self.MPU6050_MOTION_MOT_ZNEG_BIT)
def getZPosMotionDetected(self):
return self.i2c.readBit(self.MPU6050_RA_MOT_DETECT_STATUS,
self.MPU6050_MOTION_MOT_ZPOS_BIT)
def getZeroMotionDetected(self):
return self.i2c.readBit(self.MPU6050_RA_MOT_DETECT_STATUS,
self.MPU6050_MOTION_MOT_ZRMOT_BIT)
def setSlaveOutputByte(self, num, data):
if num > 3:
return
self.i2c.write8(self.MPU6050_RA_I2C_SLV0_DO + num, data)
def getExternalShadowDelayEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_DELAY_CTRL,
self.MPU6050_DELAYCTRL_DELAY_ES_SHADOW_BIT)
def setExternalShadowDelayEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_I2C_MST_DELAY_CTRL,
self.MPU6050_DELAYCTRL_DELAY_ES_SHADOW_BIT, enabled)
def getSlaveDelayEnabled(self, num):
# // MPU6050_DELAYCTRL_I2C_SLV4_DLY_EN_BIT is 4, SLV3 is 3, etc.
if num > 4:
return 0
return self.i2c.readBit(self.MPU6050_RA_I2C_MST_DELAY_CTRL, num)
def setSlaveDelayEnabled(self, num, enabled):
self.i2c.writeBit(self.MPU6050_RA_I2C_MST_DELAY_CTRL, num, enabled)
def resetGyroscopePath(self):
self.i2c.writeBit(self.MPU6050_RA_SIGNAL_PATH_RESET,
self.MPU6050_PATHRESET_GYRO_RESET_BIT, True)
def resetAccelerometerPath(self):
self.i2c.writeBit(self.MPU6050_RA_SIGNAL_PATH_RESET,
self.MPU6050_PATHRESET_ACCEL_RESET_BIT, True)
def resetTemperaturePath(self):
self.i2c.writeBit(self.MPU6050_RA_SIGNAL_PATH_RESET,
self.MPU6050_PATHRESET_TEMP_RESET_BIT, True)
def getAccelerometerPowerOnDelay(self):
return self.i2c.readBits(self.MPU6050_RA_MOT_DETECT_CTRL,
self.MPU6050_DETECT_ACCEL_ON_DELAY_BIT,
self.MPU6050_DETECT_ACCEL_ON_DELAY_LENGTH)
def setAccelerometerPowerOnDelay(self, delay):
self.i2c.writeBits(self.MPU6050_RA_MOT_DETECT_CTRL,
self.MPU6050_DETECT_ACCEL_ON_DELAY_BIT,
self.MPU6050_DETECT_ACCEL_ON_DELAY_LENGTH, delay)
def getFreefallDetectionCounterDecrement(self):
return self.i2c.readBits(self.MPU6050_RA_MOT_DETECT_CTRL,
self.MPU6050_DETECT_FF_COUNT_BIT,
self.MPU6050_DETECT_FF_COUNT_LENGTH)
def setFreefallDetectionCounterDecrement(self, decrement):
self.i2c.writeBits(self.MPU6050_RA_MOT_DETECT_CTRL,
self.MPU6050_DETECT_FF_COUNT_BIT,
self.MPU6050_DETECT_FF_COUNT_LENGTH, decrement)
def getMotionDetectionCounterDecrement(self):
return self.i2c.readBits(self.MPU6050_RA_MOT_DETECT_CTRL,
self.MPU6050_DETECT_MOT_COUNT_BIT,
self.MPU6050_DETECT_MOT_COUNT_LENGTH)
def setMotionDetectionCounterDecrement(self, decrement):
self.i2c.writeBits(self.MPU6050_RA_MOT_DETECT_CTRL,
self.MPU6050_DETECT_MOT_COUNT_BIT,
self.MPU6050_DETECT_MOT_COUNT_LENGTH, decrement)
def getFIFOEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_USER_CTRL,
self.MPU6050_USERCTRL_FIFO_EN_BIT)
def setFIFOEnabled(self, status):
self.i2c.writeBit(self.MPU6050_RA_USER_CTRL,
self.MPU6050_USERCTRL_FIFO_EN_BIT, status)
def getI2CMasterModeEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_USER_CTRL,
self.MPU6050_USERCTRL_I2C_MST_EN_BIT)
def setI2CMasterModeEnabled(self, status):
self.i2c.writeBit(self.MPU6050_RA_USER_CTRL,
self.MPU6050_USERCTRL_I2C_MST_EN_BIT, status)
def switchSPIEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_USER_CTRL,
self.MPU6050_USERCTRL_I2C_IF_DIS_BIT, enabled)
def resetFIFO(self):
self.i2c.writeBit(self.MPU6050_RA_USER_CTRL,
self.MPU6050_USERCTRL_FIFO_RESET_BIT, True)
def resetI2CMaster(self):
self.i2c.writeBit(self.MPU6050_RA_USER_CTRL,
self.MPU6050_USERCTRL_I2C_MST_RESET_BIT, True)
def resetSensors(self):
self.i2c.writeBit(self.MPU6050_RA_USER_CTRL,
self.MPU6050_USERCTRL_SIG_COND_RESET_BIT, True)
def reset(self):
self.i2c.writeBit(self.MPU6050_RA_PWR_MGMT_1,
self.MPU6050_PWR1_DEVICE_RESET_BIT, True)
def getSleepEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_PWR_MGMT_1,
self.MPU6050_PWR1_SLEEP_BIT)
def setSleepEnabled(self, status):
self.i2c.writeBit(self.MPU6050_RA_PWR_MGMT_1,
self.MPU6050_PWR1_SLEEP_BIT, status)
def getWakeCycleEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_PWR_MGMT_1,
self.MPU6050_PWR1_CYCLE_BIT)
def setWakeCycleEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_PWR_MGMT_1,
self.MPU6050_PWR1_CYCLE_BIT, enabled)
def getTempSensorEnabled(self):
result = self.i2c.readBit(self.MPU6050_RA_PWR_MGMT_1,
self.MPU6050_PWR1_TEMP_DIS_BIT)
return result == 0 # 1 is actually disabled here
def setTempSensorEnabled(self, enabled):
# 1 is actually disabled here
self.i2c.writeBit(self.MPU6050_RA_PWR_MGMT_1,
self.MPU6050_PWR1_TEMP_DIS_BIT, enabled != enabled)
def getClockSource(self):
return self.i2c.readBits(self.MPU6050_RA_PWR_MGMT_1,
self.MPU6050_PWR1_CLKSEL_BIT,
self.MPU6050_PWR1_CLKSEL_LENGTH)
def setClockSource(self, source):
self.i2c.writeBits(self.MPU6050_RA_PWR_MGMT_1,
self.MPU6050_PWR1_CLKSEL_BIT,
self.MPU6050_PWR1_CLKSEL_LENGTH, source)
def getWakeFrequency(self):
return self.i2c.readBits(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_LP_WAKE_CTRL_BIT,
self.MPU6050_PWR2_LP_WAKE_CTRL_LENGTH)
def setWakeFrequency(self, frequency):
self.i2c.writeBits(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_LP_WAKE_CTRL_BIT,
self.MPU6050_PWR2_LP_WAKE_CTRL_LENGTH, frequency)
def getStandbyXAccelEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_STBY_XA_BIT)
def setStandbyXAccelEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_STBY_XA_BIT, enabled)
def getStandbyYAccelEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_STBY_YA_BIT)
def setStandbyYAccelEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_STBY_YA_BIT, enabled)
def getStandbyZAccelEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_STBY_ZA_BIT)
def setStandbyZAccelEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_STBY_ZA_BIT, enabled)
def getStandbyXGyroEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_STBY_XG_BIT)
def setStandbyXGyroEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_STBY_XG_BIT, enabled)
def getStandbyYGyroEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_STBY_YG_BIT)
def setStandbyYGyroEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_STBY_YG_BIT, enabled)
def getStandbyZGyroEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_STBY_ZG_BIT)
def setStandbyZGyroEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_PWR_MGMT_2,
self.MPU6050_PWR2_STBY_ZG_BIT, enabled)
def getFIFOCount(self):
return self.i2c.readU16(self.MPU6050_RA_FIFO_COUNTH)
def getFIFOByte(self):
return self.i2c.readU8(self.MPU6050_RA_FIFO_R_W)
def getFIFOBytes(self, length):
return self.i2c.readBytes(self.MPU6050_RA_FIFO_R_W, length)
def setFIFOByte(self, data):
self.i2c.write8(self.MPU6050_RA_FIFO_R_W, data)
def getDeviceID(self):
return self.i2c.readBits(self.MPU6050_RA_WHO_AM_I,
self.MPU6050_WHO_AM_I_BIT,
self.MPU6050_WHO_AM_I_LENGTH)
def setDeviceID(self, id):
self.i2c.writeBits(self.MPU6050_RA_WHO_AM_I, self.MPU6050_WHO_AM_I_BIT,
self.MPU6050_WHO_AM_I_LENGTH, id)
def getOTPBankValid(self):
result = self.i2c.readBit(self.MPU6050_RA_XG_OFFS_TC,
self.MPU6050_TC_OTP_BNK_VLD_BIT)
return result
def setOTPBankValid(self, status):
self.i2c.writeBit(self.MPU6050_RA_XG_OFFS_TC,
self.MPU6050_TC_OTP_BNK_VLD_BIT, status)
def getXGyroOffset(self):
return self.i2c.readBits(self.MPU6050_RA_XG_OFFS_TC,
self.MPU6050_TC_OFFSET_BIT,
self.MPU6050_TC_OFFSET_LENGTH)
def setXGyroOffset(self, offset):
self.i2c.writeBits(self.MPU6050_RA_XG_OFFS_TC,
self.MPU6050_TC_OFFSET_BIT,
self.MPU6050_TC_OFFSET_LENGTH, offset)
def getYGyroOffset(self):
return self.i2c.readBits(self.MPU6050_RA_YG_OFFS_TC,
self.MPU6050_TC_OFFSET_BIT,
self.MPU6050_TC_OFFSET_LENGTH)
def setYGyroOffset(self, offset):
self.i2c.writeBits(self.MPU6050_RA_YG_OFFS_TC,
self.MPU6050_TC_OFFSET_BIT,
self.MPU6050_TC_OFFSET_LENGTH, offset)
def getZGyroOffset(self):
return self.i2c.readBits(self.MPU6050_RA_ZG_OFFS_TC,
self.MPU6050_TC_OFFSET_BIT,
self.MPU6050_TC_OFFSET_LENGTH)
def setZGyroOffset(self, offset):
self.i2c.writeBits(self.MPU6050_RA_ZG_OFFS_TC,
self.MPU6050_TC_OFFSET_BIT,
self.MPU6050_TC_OFFSET_LENGTH, offset)
def getXFineGain(self):
return self.i2c.readU8(self.MPU6050_RA_X_FINE_GAIN)
def setXFineGain(self, gain):
self.i2c.write8(self.MPU6050_RA_X_FINE_GAIN, gain)
def getYFineGain(self):
return self.i2c.readU8(self.MPU6050_RA_Y_FINE_GAIN)
def setYFineGain(self, gain):
self.i2c.write8(self.MPU6050_RA_Y_FINE_GAIN, gain)
def getZFineGain(self):
return self.i2c.readU8(self.MPU6050_RA_Z_FINE_GAIN)
def setZFineGain(self, gain):
self.i2c.write8(self.MPU6050_RA_Z_FINE_GAIN, gain)
def getXAccelOffset(self):
pass
def setXAccelOffset(self, offset):
pass
def getYAccelOffset(self):
pass
def setYAccelOffset(self, offset):
pass
def getZAccelOffset(self):
pass
def setZAccelOffset(self, offset):
pass
def getXGyroOffsetUser(self):
pass
def setXGyroOffsetUser(self, value):
self.i2c.write8(self.MPU6050_RA_XG_OFFS_USRH, value >> 8)
self.i2c.write8(self.MPU6050_RA_XG_OFFS_USRL, value & 0xFF)
return True
def getYGyroOffsetUser(self):
pass
def setYGyroOffsetUser(self, value):
self.i2c.write8(self.MPU6050_RA_YG_OFFS_USRH, value >> 8)
self.i2c.write8(self.MPU6050_RA_YG_OFFS_USRL, value & 0xFF)
return True
def getZGyroOffsetUser(self):
pass
def setZGyroOffsetUser(self, value):
self.i2c.write8(self.MPU6050_RA_ZG_OFFS_USRH, value >> 8)
self.i2c.write8(self.MPU6050_RA_ZG_OFFS_USRL, value & 0xFF)
return True
def getIntPLLReadyEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_PLL_RDY_INT_BIT)
def setIntPLLReadyEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_PLL_RDY_INT_BIT, enabled)
def getIntDMPEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_DMP_INT_BIT)
def setIntDMPEnabled(self, enabled):
self.i2c.writeBit(self.MPU6050_RA_INT_ENABLE,
self.MPU6050_INTERRUPT_DMP_INT_BIT, enabled)
def getDMPInt5Status(self):
return self.i2c.readBit(self.MPU6050_RA_DMP_INT_STATUS,
self.MPU6050_DMPINT_5_BIT)
def getDMPInt4Status(self):
return self.i2c.readBit(self.MPU6050_RA_DMP_INT_STATUS,
self.MPU6050_DMPINT_4_BIT)
def getDMPInt3Status(self):
return self.i2c.readBit(self.MPU6050_RA_DMP_INT_STATUS,
self.MPU6050_DMPINT_3_BIT)
def getDMPInt2Status(self):
return self.i2c.readBit(self.MPU6050_RA_DMP_INT_STATUS,
self.MPU6050_DMPINT_2_BIT)
def getDMPInt1Status(self):
return self.i2c.readBit(self.MPU6050_RA_DMP_INT_STATUS,
self.MPU6050_DMPINT_1_BIT)
def getDMPInt0Status(self):
return self.i2c.readBit(self.MPU6050_RA_DMP_INT_STATUS,
self.MPU6050_DMPINT_0_BIT)
def getIntPLLReadyStatus(self):
return self.i2c.readBit(self.MPU6050_RA_INT_STATUS,
self.MPU6050_INTERRUPT_PLL_RDY_INT_BIT)
def getIntDMPStatus(self):
return self.i2c.readBit(self.MPU6050_RA_INT_STATUS,
self.MPU6050_INTERRUPT_DMP_INT_BIT)
def getDMPEnabled(self):
return self.i2c.readBit(self.MPU6050_RA_USER_CTRL,
self.MPU6050_USERCTRL_DMP_EN_BIT)
def setDMPEnabled(self, status):
self.i2c.writeBit(self.MPU6050_RA_USER_CTRL,
self.MPU6050_USERCTRL_DMP_EN_BIT, status)
def resetDMP(self):
self.i2c.writeBit(self.MPU6050_RA_USER_CTRL,
self.MPU6050_USERCTRL_DMP_RESET_BIT, True)
def setMemoryBank(self, bank, prefetchEnabled=False, userBank=False):
bank &= 0x1F
if userBank:
bank |= 0x20
if prefetchEnabled:
bank |= 0x40
self.i2c.write8(self.MPU6050_RA_BANK_SEL, bank)
return True
def setMemoryStartAddress(self, address):
self.i2c.write8(self.MPU6050_RA_MEM_START_ADDR, address)
def readMemoryByte(self):
result = self.i2c.readU8(self.MPU6050_RA_MEM_R_W)
return result
def writeMemoryByte(self, data):
self.i2c.write8(self.MPU6050_RA_MEM_R_W, data)
def readMemoryBlock(self):
pass
def writeMemoryBlock(self, data, dataSize, bank=0, address=0,
verify=False):
self.setMemoryBank(bank)
self.setMemoryStartAddress(address)
i = 0
while i < dataSize:
self.i2c.write8(self.MPU6050_RA_MEM_R_W, data[i])
# Verify
if verify:
self.setMemoryBank(bank)
self.setMemoryStartAddress(address)
result = self.i2c.readU8(self.MPU6050_RA_MEM_R_W)
if result != data[i]:
print(data[i]),
print(result),
print(address)
# reset adress to 0 after reaching 255
if address == 255:
address = 0
bank += 1
self.setMemoryBank(bank)
else:
address += 1
self.setMemoryStartAddress(address)
# increase byte index
i += 1
def writeDMPConfigurationSet(self, data, dataSize, bank=0, address=0,
verify=False):
# config set data is a long string of blocks with the following
# structure:
# [bank] [offset] [length] [byte[0], byte[1], ..., byte[length]]
pos = 0
while pos < dataSize:
j = 0
dmpConfSet = []
while ((j < 4) or (j < dmpConfSet[2] + 3)):
dmpConfSet.append(data[pos])
j += 1
pos += 1
# write data or perform special action
if dmpConfSet[2] > 0:
# regular block of data to write
self.writeMemoryBlock(dmpConfSet[3:], dmpConfSet[2],
dmpConfSet[0], dmpConfSet[1], verify)
else:
# special instruction
# NOTE: this kind of behavior (what and when to do certain
# things)
# is totally undocumented. This code is in here based on
# observed
# behavior only, and exactly why (or even whether) it has to
# be here
# is anybody's guess for now.
if dmpConfSet[3] == 0x01:
# enable DMP-related interrupts
#setIntZeroMotionEnabled(true);
#setIntFIFOBufferOverflowEnabled(true);
#setIntDMPEnabled(true);
self.i2c.write8(self.MPU6050_RA_INT_ENABLE,
0x32); # single operation
def getDMPConfig1(self):
self.i2c.readU8(self.MPU6050_RA_DMP_CFG_1)
def setDMPConfig1(self, config):
self.i2c.write8(self.MPU6050_RA_DMP_CFG_1, config)
def getDMPConfig2(self):
return self.i2c.readU8(self.MPU6050_RA_DMP_CFG_2)
def setDMPConfig2(self, config):
self.i2c.write8(self.MPU6050_RA_DMP_CFG_2, config)
def dmpPacketAvailable(self):
return self.getFIFOCount() >= self.dmpGetFIFOPacketSize()
def dmpGetFIFOPacketSize(self):
return self.dmpPacketSize
def dmpGetAccel(self, packet):
# | [ACC X ][ ][ACC Y ][ ][ACC Z ][ ][ ] |
# | 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
return {
'x': ((u_to_s(packet[28]) << 8) + packet[29]),
'y': ((u_to_s(packet[32]) << 8) + packet[33]),
'z': ((u_to_s(packet[36]) << 8) + packet[37]),
}
def dmpGetQuaternion(self, packet):
return {
'w': ((u_to_s(packet[0]) << 8) + packet[1]) / 16384.0,
'x': ((u_to_s(packet[4]) << 8) + packet[5]) / 16384.0,
'y': ((u_to_s(packet[8]) << 8) + packet[9]) / 16384.0,
'z': ((u_to_s(packet[12]) << 8) + packet[13]) / 16384.0,
}
def dmpGetGyro(self):
pass
def dmpGetLinearAccel(self, a, g):
"""
Use dmpGetAccel output as a.
"""
return {
'x': a['x'] - g['x'] * 8192,
'y': a['y'] - g['x'] * 8192,
'z': a['z'] - g['x'] * 8192,
}
def dmpGetLinearAccelInWorld(self, a, q):
v = qv_mult((q['w'], q['x'], q['y'], q['z'], ),
(a['x'], a['y'], a['z'], ))
return {
'x': v[0],
'y': v[1],
'z': v[2],
}
def dmpGetGravity(self, q):
data = {
'x': float(2 * (q['x'] * q['z'] - q['w'] * q['y'])),
'y': float(2 * (q['w'] * q['x'] + q['y'] * q['z'])),
'z': float(
q['w'] * q['w'] - q['x'] * q['x'] - q['y'] * q['y'] + q['z'] *
q['z'])}
return data
def dmpGetEuler(self, q):
pass
def dmpGetYawPitchRoll(self, q, g):
data = {
# yaw: (about Z axis)
'yaw': atan2(2 * q['x'] * q['y'] - 2 * q['w'] * q['z'],
2 * q['w'] * q['w'] + 2 * q['x'] * q['x'] - 1),
# pitch: (nose up/down, about Y axis)
'pitch': atan(g['x'] / sqrt(g['y'] * g['y'] + g['z'] * g['z'])),
# roll: (tilt left/right, about X axis)
'roll': atan(g['y'] / sqrt(g['x'] * g['x'] + g['z'] * g['z']))}
return data
def dmpProcessFIFOPacket(self):
pass
def dmpReadAndProcessFIFOPacket(self):
pass
def dmpInitialize(self):
# Resetting MPU6050
self.reset()
sleep(0.05) # wait after reset
# Disable sleep mode
self.setSleepEnabled(False)
# get MPU hardware revision
self.setMemoryBank(0x10, True, True) # Selecting user bank 16
self.setMemoryStartAddress(0x06) # Selecting memory byte 6
hwRevision = self.readMemoryByte() # Checking hardware revision
#print('Revision @ user[16][6] ='),
#print(hex(hwRevision))
self.setMemoryBank(0, False,
False) # Resetting memory bank selection to 0
# get X/Y/Z gyro offsets
xgOffset = self.getXGyroOffset()
ygOffset = self.getYGyroOffset()
zgOffset = self.getZGyroOffset()
# Enable pass through mode
self.setI2CBypassEnabled(True)
# load DMP code into memory banks
self.writeMemoryBlock(self.dmpMemory, self.MPU6050_DMP_CODE_SIZE, 0, 0,
False)
#print('Success! DMP code written and verified')
# write DMP configuration
self.writeDMPConfigurationSet(self.dmpConfig,
self.MPU6050_DMP_CONFIG_SIZE, 0, 0,
False)
#print('Success! DMP configuration written and verified')
# Setting clock source to Z Gyro
self.setClockSource(self.MPU6050_CLOCK_PLL_ZGYRO)
# Setting DMP and FIFO_OFLOW interrupts enabled
self.setIntEnabled(0x12)
# Setting sample rate to 200Hz
self.setRate(4) # 1khz / (1 + 4) = 200 Hz [9 = 100 Hz]
# Setting external frame sync to TEMP_OUT_L[0]
self.setExternalFrameSync(self.MPU6050_EXT_SYNC_TEMP_OUT_L)
# Setting DLPF bandwidth to 42Hz
self.setDLPFMode(self.MPU6050_DLPF_BW_42)
# Setting gyro sensitivity to +/- 2000 deg/sec
self.setFullScaleGyroRange(self.MPU6050_GYRO_FS_2000)
# Setting DMP configuration bytes (function unknown)
self.setDMPConfig1(0x03)
self.setDMPConfig2(0x00)
# Clearing OTP Bank flag
self.setOTPBankValid(False)
# Setting X/Y/Z gyro offsets to previous values
self.setXGyroOffset(xgOffset)
self.setYGyroOffset(ygOffset)
self.setZGyroOffset(zgOffset)
# Setting X/Y/Z gyro user offsets to zero
#self.setXGyroOffsetUser(0)
#self.setYGyroOffsetUser(0)
#self.setZGyroOffsetUser(0)
# Writing final memory update 1/7 (function unknown)
pos = 0
j = 0
dmpUpdate = []
while (j < 4) or (j < dmpUpdate[2] + 3):
dmpUpdate.append(self.dmpUpdates[pos])
j += 1
pos += 1
self.writeMemoryBlock(dmpUpdate[3:], dmpUpdate[2], dmpUpdate[0],
dmpUpdate[1], True)
# Writing final memory update 2/7 (function unknown)
j = 0
dmpUpdate = []
while (j < 4) or (j < dmpUpdate[2] + 3):
dmpUpdate.append(self.dmpUpdates[pos])
j += 1
pos += 1
self.writeMemoryBlock(dmpUpdate[3:], dmpUpdate[2], dmpUpdate[0],
dmpUpdate[1], True)
# Resetting FIFO
self.resetFIFO()
# Reading FIFO count
fifoCount = self.getFIFOCount()
#print('Current FIFO count = %s' % fifoCount)
# Setting motion detection threshold to 2
self.setMotionDetectionThreshold(2)
# Setting zero-motion detection threshold to 156
self.setZeroMotionDetectionThreshold(156)
# Setting motion detection duration to 80
self.setMotionDetectionDuration(80)
# Setting zero-motion detection duration to 0
self.setZeroMotionDetectionDuration(0)
# Resetting FIFO
self.resetFIFO()
# Enabling FIFO
self.setFIFOEnabled(True)
# Enabling DMP
self.setDMPEnabled(True)
# Resetting DMP
self.resetDMP()
# Writing final memory update 3/7 (function unknown)
j = 0
dmpUpdate = []
while ((j < 4) or (j < dmpUpdate[2] + 3)):
dmpUpdate.append(self.dmpUpdates[pos])
j += 1
pos += 1
self.writeMemoryBlock(dmpUpdate[3:], dmpUpdate[2], dmpUpdate[0],
dmpUpdate[1], True)
# Writing final memory update 4/7 (function unknown)
j = 0
dmpUpdate = []
while ((j < 4) or (j < dmpUpdate[2] + 3)):
dmpUpdate.append(self.dmpUpdates[pos])
j += 1
pos += 1
self.writeMemoryBlock(dmpUpdate[3:], dmpUpdate[2], dmpUpdate[0],
dmpUpdate[1], True)
# Writing final memory update 5/7 (function unknown)
j = 0
dmpUpdate = []
while ((j < 4) or (j < dmpUpdate[2] + 3)):
dmpUpdate.append(self.dmpUpdates[pos])
j += 1
pos += 1
self.writeMemoryBlock(dmpUpdate[3:], dmpUpdate[2], dmpUpdate[0],
dmpUpdate[1], True)
# Waiting for FIFO count > 2
while (self.getFIFOCount() < 3):
| fifoCount = self.getFIFOCount() | 6,204 | lcc_e | python | null | a4bf3f805b8466b1223216081b5cf014eeddec9df3c3867e |
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import itertools
import json
import erpnext
import frappe
import copy
from frappe.utils.pdf import get_pdf
import pdfkit
import os
from erpnext.controllers.item_variant import (ItemVariantExistsError,
copy_attributes_to_variant, get_variant, make_variant_item_code, validate_item_variant_attributes)
from erpnext.setup.doctype.item_group.item_group import (get_parent_item_groups, invalidate_cache_for)
from frappe import _, msgprint
from frappe.utils import (nowdate, cint, cstr, flt, formatdate, get_timestamp, getdate,
now_datetime, random_string, strip)
from frappe.utils.html_utils import clean_html
from frappe.website.doctype.website_slideshow.website_slideshow import \
get_slideshow
from frappe.website.render import clear_cache
from frappe.website.website_generator import WebsiteGenerator
from frappe.model.naming import make_autoname
from six import iteritems
from erpnext.stock.stock_balance import update_bin_qty, get_reserved_qty
class DuplicateReorderRows(frappe.ValidationError):
pass
class StockExistsForTemplate(frappe.ValidationError):
pass
class InvalidBarcode(frappe.ValidationError):
pass
class Item(WebsiteGenerator):
website = frappe._dict(
page_title_field="item_name",
condition_field="show_in_website",
template="templates/generators/item.html",
no_cache=1
)
def onload(self):
super(Item, self).onload()
self.set_onload('stock_exists', self.stock_ledger_created())
self.set_asset_naming_series()
def set_asset_naming_series(self):
if not hasattr(self, '_asset_naming_series'):
from erpnext.assets.doctype.asset.asset import get_asset_naming_series
self._asset_naming_series = get_asset_naming_series()
self.set_onload('asset_naming_series', self._asset_naming_series)
def autoname(self):
if frappe.db.get_default("item_naming_by") == "Naming Series":
if self.variant_of:
if not self.item_code:
template_item_name = frappe.db.get_value("Item", self.variant_of, "item_name")
self.item_code = make_variant_item_code(self.variant_of, template_item_name, self)
else:
from frappe.model.naming import set_name_by_naming_series
set_name_by_naming_series(self)
self.item_code = self.name
elif not self.item_code or self.generer_code_interne or self.item_code == "CODE" or self.item_code == "code":
group = frappe.get_doc("Item Group",self.item_group)
group_numero = group.numero
self.item_name = group.name
if group_numero:
if self.variant_of:
fabricant = frappe.get_doc('Manufacturer',self.manufacturer)
self.item_code = make_autoname(self.variant_of+"-"+fabricant.code+".##")
else:
if(len(group_numero) < 6):
group_numero = group_numero.ljust(6,'0')
self.item_code = make_autoname(group_numero + "-" + ".####")
else:
msgprint(_("Impossible de generer le code. Groupe article n'est pas numerote."), raise_exception=1)
self.nom_generique_long = self.item_name
if self.designation_commerciale:
self.nom_generique_long += ' '+self.designation_commerciale
self.item_code = strip(self.item_code)
self.name = self.item_code
def before_insert(self):
if not self.description:
self.description = self.titre_article
self.ref_fabricant = self.manufacturer_part_no
# if self.is_sales_item and not self.get('is_item_from_hub'):
# self.publish_in_hub = 1
def after_insert(self):
'''set opening stock and item price'''
if self.standard_rate:
for default in self.item_defaults:
self.add_price(default.default_price_list)
if self.opening_stock:
self.set_opening_stock()
def set_prices(self):
if self.has_variants:
price_list = frappe.get_all("Item Price",fields=["name","price_list","price_list_rate","currency","selling","buying","manufacturer","manufacturer_part_no"],filters={"item_model":self.name})
if price_list:
self.prices = ""
self.selling = ""
for price in price_list:
text = "%s %s : %s : %.2f %s" % (price.manufacturer,price.manufacturer_part_no,price.price_list,price.price_list_rate,price.currency)
if price.buying == 1:
self.prices += text+ " / \n"
if price.selling == 1:
self.selling += text +" / \n"
#self.prices += "/ \n"
def validate(self):
if self.versions and self.generation_vehicule_supporte:
frappe.msgprint("Attention vous avez mis des valeurs dans table Version vehicule et Generation vehicule au meme temps!")
if self.versions and self.modele_vehicule_supporte:
frappe.msgprint("Attention vous avez mis des valeurs dans table Version vehicule et Modeles vehicule au meme temps!")
if self.versions and self.marque_vehicule_supporte:
frappe.msgprint("Attention vous avez mis des valeurs dans table Version vehicule et marque vehicule au meme temps!")
if self.generation_vehicule_supporte and self.modele_vehicule_supporte:
frappe.msgprint("Attention vous avez mis des valeurs dans table Generation vehicule et Modeles vehicule au meme temps!")
cr = []
#if self.has_variants:
for critere in self.critere_piece:
if critere.important:
cr.append("{0}: {1}".format(critere.parametre, (critere.valeur_p or '') +' '+ (critere.valeur or '')))
for vcritere in self.criteres_piece_variante:
if vcritere.important:
cr.append("{0}: {1}".format(vcritere.parametre, (vcritere.valeur_p or '') +' '+ (vcritere.valeur or '')))
if cr:
self.critere_text = ' / '.join(str(x) for x in cr)
#critere_text
self.oem_text = ""
for o in self.oem:
if o.oem:
o.oem_simplifie = ''.join(e for e in o.oem if e.isalnum()).replace(" ","").replace("-","").replace(".","").replace("/","").replace("_","").replace(":","")
if self.oem:
self.oem_text = ' - '.join(str(x.oem_simplifie or x.oem) for x in self.oem)
#for moem in self.oem:
# self.oem_text += "%s - " % moem.oem
self.get_doc_before_save()
if self.manufacturer_part_no:
self.ref_fabricant = self.manufacturer_part_no
if self.manufacturer:
logo = frappe.get_doc("Manufacturer",self.manufacturer)
self.fabricant_logo = logo.logo
self.titre_article = self.nom_groupe+' : '+self.manufacturer_part_no+' '+logo.full_name
else:
self.titre_article = self.item_name
super(Item, self).validate()
if self.has_variants == 0 and self.variant_of and self.variant_based_on == 'Manufacturer' and not self.manufacturer_part_no:
frappe.throw(_("Numero piece fabricant n'est pas valide"))
if not self.item_name:
self.item_name = self.item_code
if not self.description:
self.description = self.titre_article
self.validate_uom()
self.validate_description()
self.add_default_uom_in_conversion_factor_table()
self.validate_conversion_factor()
self.validate_item_type()
self.check_for_active_boms()
self.fill_customer_code()
self.check_item_tax()
self.validate_barcode()
self.validate_warehouse_for_reorder()
self.update_bom_item_desc()
self.synced_with_hub = 0
self.validate_has_variants()
self.validate_stock_exists_for_template_item()
self.validate_attributes()
self.validate_variant_attributes()
self.validate_variant_based_on_change()
self.validate_website_image()
self.make_thumbnail()
self.validate_fixed_asset()
self.validate_retain_sample()
self.validate_uom_conversion_factor()
self.validate_item_defaults()
self.update_defaults_from_item_group()
self.validate_stock_for_has_batch_and_has_serial()
if self.has_variants:
count = frappe.db.sql("""select count(name) as cnt from `tabItem` where variant_of='%s'""" % (self.name),as_dict=1)
if count:
self.nbr_variante = count[0]['cnt'] or 0
else:
count = frappe.db.sql("""select count(name) as cnt from `tabItem` where variant_of='%s'""" % (self.variant_of),as_dict=1)
if count:
self.nbr_variante = count[0]['cnt'] or 0
# self.nbr_var = ''
# vars = frappe.db.sql(''' select count(name) from `tabItem` where variant_of=%s ''',self.name)
# if vars:
# self.nbr_var = vars[0] or 0
# set table reorder
min_qts = self.recom_minimum
qts = self.recom_qts
if self.manufacturer_part_no:
self.clean_manufacturer_part_number = self.manufacturer_part_no.replace(" ","").replace("-","").replace("_","").replace("/","").replace(".","")
if min_qts == -1 and qts == -1:
self.reorder_levels = []
self.recom_minimum = 0
self.recom_qts = 0
if min_qts > 0 :
if not qts or qts == 0:
qts = 1
levels = frappe.get_all("Item Reorder",fields=["warehouse_group","name","parent","warehouse"],filters=[{"parent":self.name},{"warehouse":"GLOBAL - MV"}])
original = list(filter(lambda x: x.warehouse != "GLOBAL - MV",self.reorder_levels))
self.reorder_levels = []
row = self.append('reorder_levels',{})
row.warehouse='GLOBAL - MV'
row.warehouse_group='GLOBAL - MV'
row.warehouse_reorder_level=min_qts
row.warehouse_reorder_qty=qts
row.material_request_type='Purchase'
self.reorder_levels.extend(original)
self.recom_minimum = 0
self.recom_qts = 0
#elif levels:
#level = frappe.get_doc("Item Reorder",levels[0].name)
#level.warehouse_reorder_level=min_qts
#level.warehouse_reorder_qty=qts
#level.save()
#original = list(filter(lambda(x: x.warehouse != "GLOBAL - MV",self.reorder_levels))
nom_g = ''
if self.variant_of and self.manufacturer_part_no and self.manufacturer:
nom_g +=nom_g+ (self.manufacturer or '') +' '+(self.manufacturer_part_no or '') +' '+ (self.item_name or '') + ' '
if self.has_variants:
nom_g += (self.item_name or '') + ' '
if self.oem_text:
nom_g += (self.oem_text or '') + ' '
if self.critere_text:
nom_g += (self.critere_text or '') + ' '
if self.composant_text:
nom_g += 'Composant : ' + (self.composant_text or '')+ ' '
if self.articles_text:
nom_g += 'Complements : ' + (self.articles_text or '')+ ' '
if self.clean_manufacturer_part_number:
nom_g += (self.clean_manufacturer_part_number or '') + ' '
for v in self.versions:
nom_g += (v.marque_vehicule or '')+' '+(v.modele_vehicule or '')+' '+(v.nom_version or '')+' - '
for g in self.generation_vehicule_supporte:
nom_g += (g.nom_marque or '')+' '+(g.nom_generation or '')+' - '
for g in self.modele_vehicule_supporte:
nom_g += (g.nom_marque or '')+' '+(g.nom_modele or '')+' - '
for g in self.marque_vehicule_supporte:
nom_g += (g.marque or '')+' '
self.nom_generique_long = (nom_g or '').lower()
if not self.get("__islocal"):
self.old_item_group = frappe.db.get_value(self.doctype, self.name, "item_group")
self.old_website_item_groups = frappe.db.sql_list("""select item_group
from `tabWebsite Item Group`
where parentfield='website_item_groups' and parenttype='Item' and parent=%s""", self.name)
# update qts
self.set_qts()
def set_qts(self,save=False):
if not self.has_variants:
self.qts_total = 0
self.qts_depot =0
stotal = frappe.db.sql("""select sum(actual_qty) from tabBin where item_code=%s""",[self.item_code])
#frappe.msgprint("stotal: %s" % stotal)
if stotal:
self.qts_total = stotal[0][0]
depot_parent = frappe.db.get_value('Stock Settings', None, 'depot_parent')
if depot_parent:
warehouses= frappe.db.sql("""select name from `tabWarehouse` where parent_warehouse=%s""",(depot_parent),as_dict=True)
if warehouses:
qtotal = frappe.db.sql("""select sum(actual_qty) from tabBin where item_code='%s' and warehouse in (%s)""" % (self.item_code,', '.join(['%s']*len(warehouses))),tuple([w.name for w in warehouses]))
#frappe.msgprint("%s" % warehouses)
if qtotal:
self.qts_depot = qtotal[0][0]
if save:
#self.save()
frappe.db.set_value("Item", self.name, "qts_total", self.qts_total)
frappe.db.set_value("Item", self.name, "qts_depot", self.qts_depot)
frappe.db.commit()
def sync_comp(self):
if self.variant_of:
self.composant_text = ""
#_variantes = frappe.db.sql(""" select name,manufacturer_part_no,manufacturer from `tabItem` where variant_of= '{}'""".format(self.name),as_dict=True)
for cmp in self.composant:
if cmp.manufacturer_part_no:
self.composant_text += "%s (%s) /" % ((cmp.manufacturer_part_no or ''),cmp.item_group )
elif cmp.item:
var_comp = frappe.db.sql(""" select name,item_group,manufacturer_part_no,manufacturer from `tabItem` where variant_of= '{}' and manufacturer='{}' limit 1""".format(cmp.item,self.manufacturer),as_dict=True)
if var_comp:
_comp=var_comp[0]
self.composant_text += "%s (%s) /" % ((_comp.manufacturer_part_no or ''),_comp.item_group)
self.articles_text= ""
for art in self.articles:
if art.manufacturer_part_no:
self.articles_text += "%s (%s) /" % ((art.manufacturer_part_no or ''),art.item_group )
elif art.item:
var_comp = frappe.db.sql(""" select name,item_group,manufacturer_part_no,manufacturer from `tabItem` where variant_of= '{}' and manufacturer='{}' limit 1""".format(art.item,self.manufacturer),as_dict=True)
if var_comp:
_comp=var_comp[0]
self.articles_text += "%s (%s) /" % ((_comp.manufacturer_part_no or '') ,_comp.item_group )
def on_update(self):
invalidate_cache_for_item(self)
self.validate_name_with_item_group()
self.update_variants()
self.update_item_price()
self.update_template_item()
self.sync_comp()
def validate_description(self):
'''Clean HTML description if set'''
if cint(frappe.db.get_single_value('Stock Settings', 'clean_description_html')):
self.description = clean_html(self.description)
def add_price(self, price_list=None):
'''Add a new price'''
if not price_list:
price_list = (frappe.db.get_single_value('Selling Settings', 'selling_price_list')
or frappe.db.get_value('Price List', _('Standard Selling')))
if price_list:
item_price = frappe.get_doc({
"doctype": "Item Price",
"price_list": price_list,
"item_code": self.name,
"currency": erpnext.get_default_currency(),
"price_list_rate": self.standard_rate
})
item_price.insert()
def set_opening_stock(self):
'''set opening stock'''
if not self.is_stock_item or self.has_serial_no or self.has_batch_no:
return
if not self.valuation_rate and self.standard_rate:
self.valuation_rate = self.standard_rate
if not self.valuation_rate:
frappe.throw(_("Valuation Rate is mandatory if Opening Stock entered"))
from erpnext.stock.doctype.stock_entry.stock_entry_utils import make_stock_entry
# default warehouse, or Stores
for default in self.item_defaults:
default_warehouse = (default.default_warehouse
or frappe.db.get_single_value('Stock Settings', 'default_warehouse')
or frappe.db.get_value('Warehouse', {'warehouse_name': _('Stores')}))
if default_warehouse:
stock_entry = make_stock_entry(item_code=self.name, target=default_warehouse, qty=self.opening_stock,
rate=self.valuation_rate, company=default.company)
stock_entry.add_comment("Comment", _("Opening Stock"))
def make_route(self):
if not self.route:
return cstr(frappe.db.get_value('Item Group', self.item_group,
'route')) + '/' + self.scrub((self.item_name if self.item_name else self.item_code) + '-' + random_string(5))
def validate_website_image(self):
"""Validate if the website image is a public file"""
auto_set_website_image = False
if not self.website_image and self.image:
auto_set_website_image = True
self.website_image = self.image
if not self.website_image:
return
# find if website image url exists as public
file_doc = frappe.get_all("File", filters={
"file_url": self.website_image
}, fields=["name", "is_private"], order_by="is_private asc", limit_page_length=1)
if file_doc:
file_doc = file_doc[0]
if not file_doc:
if not auto_set_website_image:
frappe.msgprint(_("Website Image {0} attached to Item {1} cannot be found")
.format(self.website_image, self.name))
self.website_image = None
elif file_doc.is_private:
if not auto_set_website_image:
frappe.msgprint(_("Website Image should be a public file or website URL"))
self.website_image = None
def make_thumbnail(self):
"""Make a thumbnail of `website_image`"""
import requests.exceptions
if not self.is_new() and self.website_image != frappe.db.get_value(self.doctype, self.name, "website_image"):
self.thumbnail = None
if self.website_image and not self.thumbnail:
file_doc = None
try:
file_doc = frappe.get_doc("File", {
"file_url": self.website_image,
"attached_to_doctype": "Item",
"attached_to_name": self.name
})
except frappe.DoesNotExistError:
pass
# cleanup
frappe.local.message_log.pop()
except requests.exceptions.HTTPError:
frappe.msgprint(_("Warning: Invalid attachment {0}").format(self.website_image))
self.website_image = None
except requests.exceptions.SSLError:
frappe.msgprint(
_("Warning: Invalid SSL certificate on attachment {0}").format(self.website_image))
self.website_image = None
# for CSV import
if self.website_image and not file_doc:
try:
file_doc = frappe.get_doc({
"doctype": "File",
"file_url": self.website_image,
"attached_to_doctype": "Item",
"attached_to_name": self.name
}).insert()
except IOError:
self.website_image = None
if file_doc:
if not file_doc.thumbnail_url:
file_doc.make_thumbnail()
self.thumbnail = file_doc.thumbnail_url
def validate_fixed_asset(self):
if self.is_fixed_asset:
if self.is_stock_item:
frappe.throw(_("Fixed Asset Item must be a non-stock item."))
if not self.asset_category:
frappe.throw(_("Asset Category is mandatory for Fixed Asset item"))
if self.stock_ledger_created():
frappe.throw(_("Cannot be a fixed asset item as Stock Ledger is created."))
if not self.is_fixed_asset:
asset = frappe.db.get_all("Asset", filters={"item_code": self.name, "docstatus": 1}, limit=1)
if asset:
frappe.throw(_('"Is Fixed Asset" cannot be unchecked, as Asset record exists against the item'))
def validate_retain_sample(self):
if self.retain_sample and not frappe.db.get_single_value('Stock Settings', 'sample_retention_warehouse'):
frappe.throw(_("Please select Sample Retention Warehouse in Stock Settings first"))
if self.retain_sample and not self.has_batch_no:
frappe.throw(_(" {0} Retain Sample is based on batch, please check Has Batch No to retain sample of item").format(
self.item_code))
def get_context(self, context):
context.show_search = True
context.search_link = '/product_search'
context.parents = get_parent_item_groups(self.item_group)
self.set_variant_context(context)
self.set_attribute_context(context)
self.set_disabled_attributes(context)
return context
def set_variant_context(self, context):
if self.has_variants:
context.no_cache = True
# load variants
# also used in set_attribute_context
context.variants = frappe.get_all("Item",
filters={"variant_of": self.name, "show_variant_in_website": 1},
order_by="name asc")
variant = frappe.form_dict.variant
if not variant and context.variants:
# the case when the item is opened for the first time from its list
variant = context.variants[0]
if variant:
context.variant = frappe.get_doc("Item", variant)
for fieldname in ("website_image", "web_long_description", "description",
"website_specifications"):
if context.variant.get(fieldname):
value = context.variant.get(fieldname)
if isinstance(value, list):
value = [d.as_dict() for d in value]
context[fieldname] = value
if self.slideshow:
if context.variant and context.variant.slideshow:
context.update(get_slideshow(context.variant))
else:
context.update(get_slideshow(self))
def set_attribute_context(self, context):
if self.has_variants:
attribute_values_available = {}
context.attribute_values = {}
context.selected_attributes = {}
# load attributes
for v in context.variants:
v.attributes = frappe.get_all("Item Variant Attribute",
fields=["attribute", "attribute_value"],
filters={"parent": v.name})
for attr in v.attributes:
values = attribute_values_available.setdefault(attr.attribute, [])
if attr.attribute_value not in values:
values.append(attr.attribute_value)
if v.name == context.variant.name:
context.selected_attributes[attr.attribute] = attr.attribute_value
# filter attributes, order based on attribute table
for attr in self.attributes:
values = context.attribute_values.setdefault(attr.attribute, [])
if cint(frappe.db.get_value("Item Attribute", attr.attribute, "numeric_values")):
for val in sorted(attribute_values_available.get(attr.attribute, []), key=flt):
values.append(val)
else:
# get list of values defined (for sequence)
for attr_value in frappe.db.get_all("Item Attribute Value",
fields=["attribute_value"],
filters={"parent": attr.attribute}, order_by="idx asc"):
if attr_value.attribute_value in attribute_values_available.get(attr.attribute, []):
values.append(attr_value.attribute_value)
context.variant_info = json.dumps(context.variants)
def set_disabled_attributes(self, context):
"""Disable selection options of attribute combinations that do not result in a variant"""
if not self.attributes or not self.has_variants:
return
context.disabled_attributes = {}
attributes = [attr.attribute for attr in self.attributes]
def find_variant(combination):
for variant in context.variants:
if len(variant.attributes) < len(attributes):
continue
if "combination" not in variant:
ref_combination = []
for attr in variant.attributes:
idx = attributes.index(attr.attribute)
ref_combination.insert(idx, attr.attribute_value)
variant["combination"] = ref_combination
if not (set(combination) - set(variant["combination"])):
# check if the combination is a subset of a variant combination
# eg. [Blue, 0.5] is a possible combination if exists [Blue, Large, 0.5]
return True
for i, attr in enumerate(self.attributes):
if i == 0:
continue
combination_source = []
# loop through previous attributes
for prev_attr in self.attributes[:i]:
combination_source.append([context.selected_attributes.get(prev_attr.attribute)])
combination_source.append(context.attribute_values[attr.attribute])
for combination in itertools.product(*combination_source):
if not find_variant(combination):
context.disabled_attributes.setdefault(attr.attribute, []).append(combination[-1])
def add_default_uom_in_conversion_factor_table(self):
uom_conv_list = [d.uom for d in self.get("uoms")]
if self.stock_uom not in uom_conv_list:
ch = self.append('uoms', {})
ch.uom = self.stock_uom
ch.conversion_factor = 1
to_remove = []
for d in self.get("uoms"):
if d.conversion_factor == 1 and d.uom != self.stock_uom:
to_remove.append(d)
[self.remove(d) for d in to_remove]
def update_template_tables(self):
template = frappe.get_doc("Item", self.variant_of)
# add item taxes from template
for d in template.get("taxes"):
self.append("taxes", {"tax_type": d.tax_type, "tax_rate": d.tax_rate})
# copy re-order table if empty
if not self.get("reorder_levels"):
for d in template.get("reorder_levels"):
n = {}
for k in ("warehouse", "warehouse_reorder_level",
"warehouse_reorder_qty", "material_request_type"):
n[k] = d.get(k)
self.append("reorder_levels", n)
def validate_conversion_factor(self):
check_list = []
for d in self.get('uoms'):
if cstr(d.uom) in check_list:
frappe.throw(
_("Unit of Measure {0} has been entered more than once in Conversion Factor Table").format(d.uom))
else:
check_list.append(cstr(d.uom))
if d.uom and cstr(d.uom) == cstr(self.stock_uom) and flt(d.conversion_factor) != 1:
frappe.throw(
_("Conversion factor for default Unit of Measure must be 1 in row {0}").format(d.idx))
def validate_item_type(self):
if self.has_serial_no == 1 and self.is_stock_item == 0 and not self.is_fixed_asset:
msgprint(_("'Has Serial No' can not be 'Yes' for non-stock item"), raise_exception=1)
if self.has_serial_no == 0 and self.serial_no_series:
self.serial_no_series = None
def check_for_active_boms(self):
if self.default_bom:
bom_item = frappe.db.get_value("BOM", self.default_bom, "item")
if bom_item not in (self.name, self.variant_of):
frappe.throw(
_("Default BOM ({0}) must be active for this item or its template").format(bom_item))
def fill_customer_code(self):
""" Append all the customer codes and insert into "customer_code" field of item table """
cust_code = []
for d in self.get('customer_items'):
cust_code.append(d.ref_code)
self.customer_code = ','.join(cust_code)
def check_item_tax(self):
"""Check whether Tax Rate is not entered twice for same Tax Type"""
check_list = []
for d in self.get('taxes'):
if d.tax_type:
account_type = frappe.db.get_value("Account", d.tax_type, "account_type")
if account_type not in ['Tax', 'Chargeable', 'Income Account', 'Expense Account']:
frappe.throw(
_("Item Tax Row {0} must have account of type Tax or Income or Expense or Chargeable").format(d.idx))
else:
if d.tax_type in check_list:
frappe.throw(_("{0} entered twice in Item Tax").format(d.tax_type))
else:
check_list.append(d.tax_type)
def validate_barcode(self):
from stdnum import ean
if len(self.barcodes) > 0:
for item_barcode in self.barcodes:
options = frappe.get_meta("Item Barcode").get_options("barcode_type").split('\n')
if item_barcode.barcode:
duplicate = frappe.db.sql(
"""select parent from `tabItem Barcode` where barcode = %s and parent != %s""", (item_barcode.barcode, self.name))
if duplicate:
frappe.throw(_("Barcode {0} already used in Item {1}").format(
item_barcode.barcode, duplicate[0][0]), frappe.DuplicateEntryError)
item_barcode.barcode_type = "" if item_barcode.barcode_type not in options else item_barcode.barcode_type
if item_barcode.barcode_type and item_barcode.barcode_type.upper() in ('EAN', 'UPC-A', 'EAN-13', 'EAN-8'):
if not ean.is_valid(item_barcode.barcode):
frappe.throw(_("Barcode {0} is not a valid {1} code").format(
item_barcode.barcode, item_barcode.barcode_type), InvalidBarcode)
def validate_warehouse_for_reorder(self):
'''Validate Reorder level table for duplicate and conditional mandatory'''
warehouse = []
for d in self.get("reorder_levels"):
if not d.warehouse_group:
d.warehouse_group = d.warehouse
if d.get("warehouse") and d.get("warehouse") not in warehouse:
warehouse += [d.get("warehouse")]
else:
frappe.throw(_("Row {0}: An Reorder entry already exists for this warehouse {1}")
.format(d.idx, d.warehouse), DuplicateReorderRows)
if d.warehouse_reorder_level and not d.warehouse_reorder_qty:
frappe.throw(_("Row #{0}: Please set reorder quantity").format(d.idx))
def stock_ledger_created(self):
if not hasattr(self, '_stock_ledger_created'):
self._stock_ledger_created = len(frappe.db.sql("""select name from `tabStock Ledger Entry`
where item_code = %s limit 1""", self.name))
return self._stock_ledger_created
def validate_name_with_item_group(self):
# causes problem with tree build
if frappe.db.exists("Item Group", self.name):
frappe.throw(
_("An Item Group exists with same name, please change the item name or rename the item group"))
def update_item_price(self):
frappe.db.sql("""update `tabItem Price` set item_name=%s,
item_description=%s, brand=%s where item_code=%s""",
(self.item_name, self.description, self.brand, self.name))
def on_trash(self):
super(Item, self).on_trash()
frappe.db.sql("""delete from tabBin where item_code=%s""", self.name)
frappe.db.sql("delete from `tabItem Price` where item_code=%s", self.name)
for variant_of in frappe.get_all("Item", filters={"variant_of": self.name}):
frappe.delete_doc("Item", variant_of.name)
def before_rename(self, old_name, new_name, merge=False):
if self.item_name == old_name:
frappe.db.set_value("Item", old_name, "item_name", new_name)
if merge:
# Validate properties before merging
if not frappe.db.exists("Item", new_name):
frappe.throw(_("Item {0} does not exist").format(new_name))
field_list = ["stock_uom", "is_stock_item", "has_serial_no", "has_batch_no"]
new_properties = [cstr(d) for d in frappe.db.get_value("Item", new_name, field_list)]
if new_properties != [cstr(self.get(fld)) for fld in field_list]:
frappe.throw(_("To merge, following properties must be same for both items")
+ ": \n" + ", ".join([self.meta.get_label(fld) for fld in field_list]))
def after_rename(self, old_name, new_name, merge):
if self.route:
invalidate_cache_for_item(self)
clear_cache(self.route)
frappe.db.set_value("Item", new_name, "item_code", new_name)
if merge:
self.set_last_purchase_rate(new_name)
self.recalculate_bin_qty(new_name)
for dt in ("Sales Taxes and Charges", "Purchase Taxes and Charges"):
for d in frappe.db.sql("""select name, item_wise_tax_detail from `tab{0}`
where ifnull(item_wise_tax_detail, '') != ''""".format(dt), as_dict=1):
item_wise_tax_detail = json.loads(d.item_wise_tax_detail)
if isinstance(item_wise_tax_detail, dict) and old_name in item_wise_tax_detail:
item_wise_tax_detail[new_name] = item_wise_tax_detail[old_name]
item_wise_tax_detail.pop(old_name)
frappe.db.set_value(dt, d.name, "item_wise_tax_detail",
json.dumps(item_wise_tax_detail), update_modified=False)
def set_last_purchase_rate(self, new_name):
last_purchase_rate = get_last_purchase_details(new_name).get("base_rate", 0)
frappe.db.set_value("Item", new_name, "last_purchase_rate", last_purchase_rate)
def recalculate_bin_qty(self, new_name):
from erpnext.stock.stock_balance import repost_stock
frappe.db.auto_commit_on_many_writes = 1
existing_allow_negative_stock = frappe.db.get_value("Stock Settings", None, "allow_negative_stock")
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
repost_stock_for_warehouses = frappe.db.sql_list("""select distinct warehouse
from tabBin where item_code=%s""", new_name)
# Delete all existing bins to avoid duplicate bins for the same item and warehouse
frappe.db.sql("delete from `tabBin` where item_code=%s", new_name)
for warehouse in repost_stock_for_warehouses:
repost_stock(new_name, warehouse)
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", existing_allow_negative_stock)
frappe.db.auto_commit_on_many_writes = 0
def copy_specification_from_item_group(self):
self.set("website_specifications", [])
if self.item_group:
for label, desc in frappe.db.get_values("Item Website Specification",
{"parent": self.item_group}, ["label", "description"]):
row = self.append("website_specifications")
row.label = label
row.description = desc
def update_bom_item_desc(self):
if self.is_new():
return
if self.db_get('description') != self.description:
frappe.db.sql("""
update `tabBOM`
set description = %s
where item = %s and docstatus < 2
""", (self.description, self.name))
frappe.db.sql("""
update `tabBOM Item`
set description = %s
where item_code = %s and docstatus < 2
""", (self.description, self.name))
frappe.db.sql("""
update `tabBOM Explosion Item`
set description = %s
where item_code = %s and docstatus < 2
""", (self.description, self.name))
def update_template_item(self):
"""Set Show in Website for Template Item if True for its Variant"""
if self.variant_of:
if self.show_in_website:
self.show_variant_in_website = 1
self.show_in_website = 0
if self.show_variant_in_website:
# show template
template_item = frappe.get_doc("Item", self.variant_of)
if not template_item.show_in_website:
template_item.show_in_website = 1
template_item.flags.dont_update_variants = True
template_item.flags.ignore_permissions = True
template_item.save()
def validate_item_defaults(self):
companies = list(set([row.company for row in self.item_defaults]))
if len(companies) != len(self.item_defaults):
frappe.throw(_("Cannot set multiple Item Defaults for a company."))
def update_defaults_from_item_group(self):
"""Get defaults from Item Group"""
if self.item_group and not self.item_defaults:
item_defaults = frappe.db.get_values("Item Default", {"parent": self.item_group},
['company', 'default_warehouse','default_price_list','buying_cost_center','default_supplier',
'expense_account','selling_cost_center','income_account'], as_dict = 1)
if item_defaults:
for item in item_defaults:
self.append('item_defaults', {
'company': item.company,
'default_warehouse': item.default_warehouse,
'default_price_list': item.default_price_list,
'buying_cost_center': item.buying_cost_center,
'default_supplier': item.default_supplier,
'expense_account': item.expense_account,
'selling_cost_center': item.selling_cost_center,
'income_account': item.income_account
})
else:
warehouse = ''
defaults = frappe.defaults.get_defaults() or {}
# To check default warehouse is belong to the default company
if defaults.get("default_warehouse") and frappe.db.exists("Warehouse",
{'name': defaults.default_warehouse, 'company': defaults.company}):
warehouse = defaults.default_warehouse
self.append("item_defaults", {
"company": defaults.get("company"),
"default_warehouse": warehouse
})
def update_variants(self):
if self.flags.dont_update_variants or \
frappe.db.get_single_value('Item Variant Settings', 'do_not_update_variants'):
return
if self.has_variants:
variants = frappe.db.get_all("Item", fields=["item_code"], filters={"variant_of": self.name})
if variants:
if len(variants) <= 30:
update_variants(variants, self, publish_progress=False)
frappe.msgprint(_("Item Variants updated"))
else:
frappe.enqueue("erpnext.stock.doctype.item.item.update_variants",
variants=variants, template=self, now=frappe.flags.in_test, timeout=600)
def validate_has_variants(self):
if not self.has_variants and frappe.db.get_value("Item", self.name, "has_variants"):
if frappe.db.exists("Item", {"variant_of": self.name}):
frappe.throw(_("Item has variants."))
def validate_stock_exists_for_template_item(self):
if self.stock_ledger_created() and self._doc_before_save:
if (cint(self._doc_before_save.has_variants) != cint(self.has_variants)
or self._doc_before_save.variant_of != self.variant_of):
frappe.throw(_("Cannot change Variant properties after stock transaction. You will have to make a new Item to do this.").format(self.name),
StockExistsForTemplate)
if self.has_variants or self.variant_of:
if not self.is_child_table_same('attributes'):
frappe.throw(
_('Cannot change Attributes after stock transaction. Make a new Item and transfer stock to the new Item'))
def validate_variant_based_on_change(self):
if not self.is_new() and (self.variant_of or (self.has_variants and frappe.get_all("Item", {"variant_of": self.name}))):
if self.variant_based_on != frappe.db.get_value("Item", self.name, "variant_based_on"):
frappe.throw(_("Variant Based On cannot be changed"))
def validate_uom(self):
if not self.get("__islocal"):
check_stock_uom_with_bin(self.name, self.stock_uom)
if self.has_variants:
for d in frappe.db.get_all("Item", filters={"variant_of": self.name}):
check_stock_uom_with_bin(d.name, self.stock_uom)
if self.variant_of:
template_uom = frappe.db.get_value("Item", self.variant_of, "stock_uom")
#if template_uom != self.stock_uom:
#frappe.throw(_("Default Unit of Measure for Variant '{0}' must be same as in Template '{1}'")
# .format(self.stock_uom, template_uom))
def validate_uom_conversion_factor(self):
if self.uoms:
for d in self.uoms:
value = get_uom_conv_factor(d.uom, self.stock_uom)
if value:
d.conversion_factor = value
def validate_attributes(self):
if not (self.has_variants or self.variant_of):
return
if not self.variant_based_on:
self.variant_based_on = 'Item Attribute'
if self.variant_based_on == 'Item Attribute':
attributes = []
if not self.attributes:
frappe.throw(_("Attribute table is mandatory"))
for d in self.attributes:
if d.attribute in attributes:
frappe.throw(
_("Attribute {0} selected multiple times in Attributes Table".format(d.attribute)))
else:
attributes.append(d.attribute)
def validate_variant_attributes(self):
if self.is_new() and self.variant_of and self.variant_based_on == 'Item Attribute':
args = {}
for d in self.attributes:
if cstr(d.attribute_value).strip() == '':
frappe.throw(_("Please specify Attribute Value for attribute {0}").format(d.attribute))
args[d.attribute] = d.attribute_value
variant = get_variant(self.variant_of, args, self.name)
if variant:
frappe.throw(_("Item variant {0} exists with same attributes")
.format(variant), ItemVariantExistsError)
validate_item_variant_attributes(self, args)
def validate_stock_for_has_batch_and_has_serial(self):
if self.stock_ledger_created():
for value in ["has_batch_no", "has_serial_no"]:
if frappe.db.get_value("Item", self.name, value) != self.get_value(value):
frappe.throw(_("Cannot change {0} as Stock Transaction for Item {1} exist.".format(value, self.name)))
def get_timeline_data(doctype, name):
'''returns timeline data based on stock ledger entry'''
out = {}
items = dict(frappe.db.sql('''select posting_date, count(*)
from `tabStock Ledger Entry` where item_code=%s
and posting_date > date_sub(curdate(), interval 1 year)
group by posting_date''', name))
for date, count in iteritems(items):
timestamp = get_timestamp(date)
out.update({timestamp: count})
return out
def validate_end_of_life(item_code, end_of_life=None, disabled=None, verbose=1):
if (not end_of_life) or (disabled is None):
end_of_life, disabled = frappe.db.get_value("Item", item_code, ["end_of_life", "disabled"])
if end_of_life and end_of_life != "0000-00-00" and getdate(end_of_life) <= now_datetime().date():
msg = _("Item {0} has reached its end of life on {1}").format(item_code, formatdate(end_of_life))
_msgprint(msg, verbose)
if disabled:
_msgprint(_("Item {0} is disabled").format(item_code), verbose)
def validate_is_stock_item(item_code, is_stock_item=None, verbose=1):
if not is_stock_item:
is_stock_item = frappe.db.get_value("Item", item_code, "is_stock_item")
if is_stock_item != 1:
msg = _("Item {0} is not a stock Item").format(item_code)
_msgprint(msg, verbose)
def validate_cancelled_item(item_code, docstatus=None, verbose=1):
if docstatus is None:
docstatus = frappe.db.get_value("Item", item_code, "docstatus")
if docstatus == 2:
msg = _("Item {0} is cancelled").format(item_code)
_msgprint(msg, verbose)
def _msgprint(msg, verbose):
if verbose:
msgprint(msg, raise_exception=True)
else:
raise frappe.ValidationError(msg)
def get_last_purchase_details(item_code, doc_name=None, conversion_rate=1.0):
"""returns last purchase details in stock uom"""
# get last purchase order item details
last_purchase_order = frappe.db.sql("""\
select po.name, po.transaction_date, po.conversion_rate,
po_item.conversion_factor, po_item.base_price_list_rate,
po_item.discount_percentage, po_item.base_rate, po_item.rate
from `tabPurchase Order` po, `tabPurchase Order Item` po_item
where po.docstatus = 1 and po_item.item_code = %s and po.name != %s and
po.name = po_item.parent
order by po.transaction_date desc, po.name desc
limit 1""", (item_code, cstr(doc_name)), as_dict=1)
# get last purchase receipt item details
last_purchase_receipt = frappe.db.sql("""\
select pr.name, pr.posting_date, pr.posting_time, pr.conversion_rate,
pr_item.conversion_factor, pr_item.base_price_list_rate, pr_item.discount_percentage,
pr_item.base_rate, pr_item.rate
from `tabPurchase Receipt` pr, `tabPurchase Receipt Item` pr_item
where pr.docstatus = 1 and pr_item.item_code = %s and pr.name != %s and
pr.name = pr_item.parent
order by pr.posting_date desc, pr.posting_time desc, pr.name desc
limit 1""", (item_code, cstr(doc_name)), as_dict=1)
purchase_order_date = getdate(last_purchase_order and last_purchase_order[0].transaction_date
or "1900-01-01")
purchase_receipt_date = getdate(last_purchase_receipt and
last_purchase_receipt[0].posting_date or "1900-01-01")
if (purchase_order_date > purchase_receipt_date) or \
(last_purchase_order and not last_purchase_receipt):
# use purchase order
last_purchase = last_purchase_order[0]
purchase_date = purchase_order_date
elif (purchase_receipt_date > purchase_order_date) or \
(last_purchase_receipt and not last_purchase_order):
# use purchase receipt
last_purchase = last_purchase_receipt[0]
purchase_date = purchase_receipt_date
else:
return frappe._dict()
conversion_factor = flt(last_purchase.conversion_factor)
out = frappe._dict({
"base_price_list_rate": flt(last_purchase.base_price_list_rate) / conversion_factor,
"base_rate": flt(last_purchase.base_rate) / conversion_factor,
"discount_percentage": flt(last_purchase.discount_percentage),
"purchase_date": purchase_date,
"rate":flt(last_purchase.rate)
})
conversion_rate = flt(conversion_rate) or 1.0
out.update({
"price_list_rate": out.base_price_list_rate / conversion_rate,
"base_rate": out.base_rate
})
return out
def invalidate_cache_for_item(doc):
invalidate_cache_for(doc, doc.item_group)
website_item_groups = list(set((doc.get("old_website_item_groups") or [])
+ [d.item_group for d in doc.get({"doctype": "Website Item Group"}) if d.item_group]))
for item_group in website_item_groups:
invalidate_cache_for(doc, item_group)
if doc.get("old_item_group") and doc.get("old_item_group") != doc.item_group and frappe.db.exists({"doctype": "Item Group","name": doc.old_item_group}):
invalidate_cache_for(doc, doc.old_item_group)
def check_stock_uom_with_bin(item, stock_uom):
if stock_uom == frappe.db.get_value("Item", item, "stock_uom"):
return
matched = True
ref_uom = frappe.db.get_value("Stock Ledger Entry",
{"item_code": item}, "stock_uom")
if ref_uom:
if cstr(ref_uom) != cstr(stock_uom):
matched = False
else:
bin_list = frappe.db.sql("select * from tabBin where item_code=%s", item, as_dict=1)
for bin in bin_list:
if (bin.reserved_qty > 0 or bin.ordered_qty > 0 or bin.indented_qty > 0
or bin.planned_qty > 0) and cstr(bin.stock_uom) != cstr(stock_uom):
matched = False
break
if matched and bin_list:
frappe.db.sql("""update tabBin set stock_uom=%s where item_code=%s""", (stock_uom, item))
if not matched:
frappe.throw(
_("Default Unit of Measure for Item {0} cannot be changed directly because you have already made some transaction(s) with another UOM. You will need to create a new Item to use a different Default UOM.").format(item))
def get_item_defaults(item_code, company):
item = frappe.get_cached_doc('Item', item_code)
out = item.as_dict()
for d in item.item_defaults:
if d.company == company:
row = copy.deepcopy(d.as_dict())
row.pop("name")
out.update(row)
return out
def set_item_default(item_code, company, fieldname, value):
item = frappe.get_cached_doc('Item', item_code)
for d in item.item_defaults:
if d.company == company:
if not d.get(fieldname):
frappe.db.set_value(d.doctype, d.name, fieldname, value)
return
# no row found, add a new row for the company
d = item.append('item_defaults', {fieldname: value, "company": company})
d.db_insert()
item.clear_cache()
@frappe.whitelist()
def get_uom_conv_factor(uom, stock_uom):
uoms = [uom, stock_uom]
value = ""
uom_details = frappe.db.sql("""select to_uom, from_uom, value from `tabUOM Conversion Factor`\
where to_uom in ({0})
""".format(', '.join(['"' + frappe.db.escape(i, percent=False) + '"' for i in uoms])), as_dict=True)
for d in uom_details:
if d.from_uom == stock_uom and d.to_uom == uom:
value = 1/flt(d.value)
elif d.from_uom == uom and d.to_uom == stock_uom:
value = d.value
if not value:
uom_stock = frappe.db.get_value("UOM Conversion Factor", {"to_uom": stock_uom}, ["from_uom", "value"], as_dict=1)
uom_row = frappe.db.get_value("UOM Conversion Factor", {"to_uom": uom}, ["from_uom", "value"], as_dict=1)
if uom_stock and uom_row:
if uom_stock.from_uom == uom_row.from_uom:
value = flt(uom_stock.value) * 1/flt(uom_row.value)
return value
@frappe.whitelist()
def get_item_attribute(parent, attribute_value=''):
if not frappe.has_permission("Item"):
frappe.msgprint(_("No Permission"), raise_exception=1)
return frappe.get_all("Item Attribute Value", fields = ["attribute_value"],
filters = {'parent': parent, 'attribute_value': ("like", "%%%s%%" % attribute_value)})
@frappe.whitelist()
def set_item_transfer(item_code,qty,warehouse):
if item_code and qty and warehouse:
qty = flt(qty)
company = frappe.db.get_single_value('Global Defaults', 'default_company')
mr = frappe.new_doc("Material Request")
mr.update({
"company": company,
"transaction_date": nowdate(),
"warehouse": warehouse,
"material_request_type": "Material Transfer"
})
item = frappe.get_doc("Item",item_code)
uom = item.stock_uom
conversion_factor = 1.0
uom = item.purchase_uom or item.stock_uom
if uom != item.stock_uom:
conversion_factor = frappe.db.get_value("UOM Conversion Detail",
{'parent': item.name, 'uom': uom}, 'conversion_factor') or 1.0
mr.append("items", {
"doctype": "Material Request Item",
"item_code": item.item_code,
"schedule_date": nowdate(),
"qty": qty / conversion_factor,
"uom": uom,
"stock_uom": item.stock_uom,
"warehouse": warehouse,
"item_name": item.item_name,
"description": item.description,
"item_group": item.item_group,
"brand": item.brand,
})
mr.schedule_date = nowdate()
mr.insert()
mr.submit()
return "Demande enregistree"
else:
return "---- Verifier les donnees qts et article -----"
@frappe.whitelist()
def set_item_demande(item_code,qty):
if item_code and qty:
qty = flt(qty)
company = frappe.db.get_single_value('Global Defaults', 'default_company')
mr = frappe.new_doc("Material Request")
mr.update({
"company": company,
"transaction_date": nowdate(),
"material_request_type": "Purchase"
})
item = frappe.get_doc("Item",item_code)
uom = item.stock_uom
conversion_factor = 1.0
uom = item.purchase_uom or item.stock_uom
if uom != item.stock_uom:
conversion_factor = frappe.db.get_value("UOM Conversion Detail",
{'parent': item.name, 'uom': uom}, 'conversion_factor') or 1.0
mr.append("items", {
"doctype": "Material Request Item",
"item_code": item.item_code,
"schedule_date": nowdate(),
"qty": qty / conversion_factor,
"uom": uom,
"stock_uom": item.stock_uom,
"warehouse": "GLOBAL - MV",
"item_name": item.item_name,
"description": item.description,
"item_group": item.item_group,
"brand": item.brand,
})
mr.schedule_date = nowdate()
mr.insert()
mr.submit()
return "Demande enregistree"
else:
return "---- Verifier les donnees qts et article -----"
@frappe.whitelist()
def set_item_achat(item_code):
if item_code:
item = frappe.get_doc("Item",item_code)
if item:
if item.is_purchase_item:
item.is_purchase_item = 0
else:
item.is_purchase_item = 1
item.save()
return "ACHAT : %s" % item.is_purchase_item
def update_variants(variants, template, publish_progress=True):
count=0
#Composant
if template.articles:
for comp in template.articles:
if comp.item:
other_comp = frappe.get_doc("Item",comp.item)
if other_comp.has_variants and template.name not in {a.item for a in other_comp.articles}:
row = other_comp.append('articles',{})
row.item = template.name
other_comp.save()
for d in variants:
variant = frappe.get_doc("Item", d)
copy_attributes_to_variant(template, variant)
variant.sync_comp()
variant.save()
count+=1
if publish_progress:
frappe.publish_progress(count*100/len(variants), title = _("Updating Variants..."))
@frappe.whitelist()
def delete_order_item(item_code):
if item_code:
item = frappe.get_doc("Sales Order Item",item_code)
frappe.db.sql("""delete from `tabSales Order Item` where name = %s""", (item_code))
update_bin_qty(item.item_code, item.warehouse, {
"reserved_qty": get_reserved_qty(item.item_code, item.warehouse)
})
return "Article %s est Supprime" % (item_code)
@frappe.whitelist()
def bulk_print_list(names):
if names:
names = {"names":names.split(",")}
bulk_print_memberships(json.dumps(names))
return "ok"
@frappe.whitelist()
def bulk_print_memberships(names):
names = json.loads(names)
if names and 'names' in names:
| names = names['names'] | 4,346 | lcc_e | python | null | e6ef26f48ff864846fd354ceed186901245ee88c5aa2b90f |
|
"""
Test split modulestore w/o using any django stuff.
"""
from mock import patch
import datetime
from importlib import import_module
from path import Path as path
import random
import re
import unittest
import uuid
from contracts import contract
from nose.plugins.attrib import attr
from django.core.cache import get_cache, InvalidCacheBackendError
from openedx.core.lib import tempdir
from xblock.fields import Reference, ReferenceList, ReferenceValueDict
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import (
ItemNotFoundError, VersionConflictError,
DuplicateItemError, DuplicateCourseError,
InsufficientSpecificationError
)
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator, VersionTree, LocalId
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.x_module import XModuleMixin
from xmodule.fields import Date, Timedelta
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore
from xmodule.modulestore.tests.test_modulestore import check_has_course_method
from xmodule.modulestore.split_mongo import BlockKey
from xmodule.modulestore.tests.factories import check_mongo_calls
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.modulestore.tests.utils import mock_tab_from_json
from xmodule.modulestore.edit_info import EditInfoMixin
BRANCH_NAME_DRAFT = ModuleStoreEnum.BranchName.draft
BRANCH_NAME_PUBLISHED = ModuleStoreEnum.BranchName.published
@attr('mongo')
class SplitModuleTest(unittest.TestCase):
'''
The base set of tests manually populates a db w/ courses which have
versions. It creates unique collection names and removes them after all
tests finish.
'''
# Snippets of what would be in the django settings envs file
DOC_STORE_CONFIG = {
'host': MONGO_HOST,
'db': 'test_xmodule',
'port': MONGO_PORT_NUM,
'collection': 'modulestore{0}'.format(uuid.uuid4().hex[:5]),
}
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': tempdir.mkdtemp_clean(),
'xblock_mixins': (InheritanceMixin, XModuleMixin, EditInfoMixin)
}
MODULESTORE = {
'ENGINE': 'xmodule.modulestore.split_mongo.split.SplitMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
}
# don't create django dependency; so, duplicates common.py in envs
match = re.search(r'(.*?/common)(?:$|/)', path(__file__))
COMMON_ROOT = match.group(1)
modulestore = None
_date_field = Date()
_time_delta_field = Timedelta()
COURSE_CONTENT = {
"testx.GreekHero": {
"org": "testx",
"course": "GreekHero",
"run": "run",
"root_block_id": "head12345",
"user_id": "test@edx.org",
"fields": {
"tabs": [
{
"type": "courseware"
},
{
"type": "course_info",
"name": "Course Info"
},
{
"type": "discussion",
"name": "Discussion"
},
{
"type": "wiki",
"name": "Wiki"
}
],
"start": _date_field.from_json("2013-02-14T05:00"),
"display_name": "The Ancient Greek Hero",
"grading_policy": {
"GRADER": [
{
"min_count": 5,
"weight": 0.15,
"type": "Homework",
"drop_count": 1,
"short_label": "HWa"
},
{
"short_label": "",
"min_count": 2,
"type": "Lab",
"drop_count": 0,
"weight": 0.15
},
{
"short_label": "Midterm",
"min_count": 1,
"type": "Midterm Exam",
"drop_count": 0,
"weight": 0.3
},
{
"short_label": "Final",
"min_count": 1,
"type": "Final Exam",
"drop_count": 0,
"weight": 0.4
}
],
"GRADE_CUTOFFS": {
"Pass": 0.75
},
},
},
"revisions": [
{
"user_id": "testassist@edx.org",
"update": {
("course", "head12345"): {
"end": _date_field.from_json("2013-04-13T04:30"),
"tabs": [
{
"type": "courseware"
},
{
"type": "course_info",
"name": "Course Info"
},
{
"type": "discussion",
"name": "Discussion"
},
{
"type": "wiki",
"name": "Wiki"
},
{
"type": "static_tab",
"name": "Syllabus",
"url_slug": "01356a17b5924b17a04b7fc2426a3798"
},
{
"type": "static_tab",
"name": "Advice for Students",
"url_slug": "57e9991c0d794ff58f7defae3e042e39"
}
],
"graceperiod": _time_delta_field.from_json("2 hours 0 minutes 0 seconds"),
"grading_policy": {
"GRADER": [
{
"min_count": 5,
"weight": 0.15,
"type": "Homework",
"drop_count": 1,
"short_label": "HWa"
},
{
"short_label": "",
"min_count": 12,
"type": "Lab",
"drop_count": 2,
"weight": 0.15
},
{
"short_label": "Midterm",
"min_count": 1,
"type": "Midterm Exam",
"drop_count": 0,
"weight": 0.3
},
{
"short_label": "Final",
"min_count": 1,
"type": "Final Exam",
"drop_count": 0,
"weight": 0.4
}
],
"GRADE_CUTOFFS": {
"Pass": 0.55
}
},
}
}
},
{
"user_id": "testassist@edx.org",
"update": {
("course", "head12345"): {
"end": _date_field.from_json("2013-06-13T04:30"),
"grading_policy": {
"GRADER": [
{
"min_count": 4,
"weight": 0.15,
"type": "Homework",
"drop_count": 2,
"short_label": "HWa"
},
{
"short_label": "",
"min_count": 12,
"type": "Lab",
"drop_count": 2,
"weight": 0.15
},
{
"short_label": "Midterm",
"min_count": 1,
"type": "Midterm Exam",
"drop_count": 0,
"weight": 0.3
},
{
"short_label": "Final",
"min_count": 1,
"type": "Final Exam",
"drop_count": 0,
"weight": 0.4
}
],
"GRADE_CUTOFFS": {
"Pass": 0.45
}
},
"enrollment_start": _date_field.from_json("2013-01-01T05:00"),
"enrollment_end": _date_field.from_json("2013-03-02T05:00"),
"advertised_start": "Fall 2013",
}
},
"create": [
{
"id": "chapter1",
"parent": "head12345",
"parent_type": "course",
"category": "chapter",
"fields": {
"display_name": "Hercules"
},
},
{
"id": "chapter2",
"parent": "head12345",
"parent_type": "course",
"category": "chapter",
"fields": {
"display_name": "Hera heckles Hercules"
},
},
{
"id": "chapter3",
"parent": "head12345",
"parent_type": "course",
"category": "chapter",
"fields": {
"display_name": "Hera cuckolds Zeus"
},
},
{
"id": "problem1",
"parent": "chapter3",
"parent_type": "chapter",
"category": "problem",
"fields": {
"display_name": "Problem 3.1",
"graceperiod": _time_delta_field.from_json("4 hours 0 minutes 0 seconds"),
},
},
{
"id": "problem3_2",
"parent": "chapter3",
"parent_type": "chapter",
"category": "problem",
"fields": {
"display_name": "Problem 3.2"
},
},
{
"id": "problem32",
"parent": "chapter3",
"parent_type": "chapter",
"category": "problem",
"fields": {
"display_name": "Problem 3.3",
"group_access": {"3": ["33"]},
},
}
]
},
]
},
"testx.wonderful": {
"org": "testx",
"course": "wonderful",
"run": "run",
"root_block_id": "head23456",
"user_id": "test@edx.org",
"fields": {
"tabs": [
{
"type": "courseware"
},
{
"type": "course_info",
"name": "Course Info"
},
{
"type": "discussion",
"name": "Discussion"
},
{
"type": "wiki",
"name": "Wiki"
}
],
"start": _date_field.from_json("2013-02-14T05:00"),
"display_name": "A wonderful course",
"grading_policy": {
"GRADER": [
{
"min_count": 14,
"weight": 0.25,
"type": "Homework",
"drop_count": 1,
"short_label": "HWa"
},
{
"short_label": "",
"min_count": 12,
"type": "Lab",
"drop_count": 2,
"weight": 0.25
},
{
"short_label": "Midterm",
"min_count": 1,
"type": "Midterm Exam",
"drop_count": 0,
"weight": 0.2
},
{
"short_label": "Final",
"min_count": 1,
"type": "Final Exam",
"drop_count": 0,
"weight": 0.3
}
],
"GRADE_CUTOFFS": {
"Pass": 0.95
}
},
},
"revisions": [
{
"user_id": "test@edx.org",
"update": {
("course", "head23456"): {
"display_name": "The most wonderful course",
"grading_policy": {
"GRADER": [
{
"min_count": 14,
"weight": 0.25,
"type": "Homework",
"drop_count": 1,
"short_label": "HWa"
},
{
"short_label": "",
"min_count": 12,
"type": "Lab",
"drop_count": 2,
"weight": 0.25
},
{
"short_label": "Midterm",
"min_count": 1,
"type": "Midterm Exam",
"drop_count": 0,
"weight": 0.2
},
{
"short_label": "Final",
"min_count": 1,
"type": "Final Exam",
"drop_count": 0,
"weight": 0.3
}
],
"GRADE_CUTOFFS": {
"Pass": 0.45
}
},
}
}
}
]
},
"guestx.contender": {
"org": "guestx",
"course": "contender",
"run": "run",
"root_block_id": "head345679",
"user_id": "test@guestx.edu",
"fields": {
"tabs": [
{
"type": "courseware"
},
{
"type": "course_info",
"name": "Course Info"
},
{
"type": "discussion",
"name": "Discussion"
},
{
"type": "wiki",
"name": "Wiki"
}
],
"start": _date_field.from_json("2013-03-14T05:00"),
"display_name": "Yet another contender",
"grading_policy": {
"GRADER": [
{
"min_count": 4,
"weight": 0.25,
"type": "Homework",
"drop_count": 0,
"short_label": "HW"
},
{
"short_label": "Midterm",
"min_count": 1,
"type": "Midterm Exam",
"drop_count": 0,
"weight": 0.4
},
{
"short_label": "Final",
"min_count": 1,
"type": "Final Exam",
"drop_count": 0,
"weight": 0.35
}
],
"GRADE_CUTOFFS": {
"Pass": 0.25
}
},
}
},
}
@staticmethod
def bootstrapDB(split_store): # pylint: disable=invalid-name
'''
Sets up the initial data into the db
'''
for _course_id, course_spec in SplitModuleTest.COURSE_CONTENT.iteritems():
course = split_store.create_course(
course_spec['org'],
course_spec['course'],
course_spec['run'],
course_spec['user_id'],
master_branch=BRANCH_NAME_DRAFT,
fields=course_spec['fields'],
root_block_id=course_spec['root_block_id']
)
for revision in course_spec.get('revisions', []):
for (block_type, block_id), fields in revision.get('update', {}).iteritems():
# cheat since course is most frequent
if course.location.block_id == block_id:
block = course
else:
# not easy to figure out the category but get_item won't care
block_usage = BlockUsageLocator.make_relative(course.location, block_type, block_id)
block = split_store.get_item(block_usage)
for key, value in fields.iteritems():
setattr(block, key, value)
# create new blocks into dag: parent must already exist; thus, order is important
new_ele_dict = {}
for spec in revision.get('create', []):
if spec['parent'] in new_ele_dict:
parent = new_ele_dict.get(spec['parent'])
elif spec['parent'] == course.location.block_id:
parent = course
else:
block_usage = BlockUsageLocator.make_relative(course.location, spec['parent_type'], spec['parent'])
parent = split_store.get_item(block_usage)
block_id = LocalId(spec['id'])
child = split_store.create_xblock(
course.runtime, course.id, spec['category'], block_id, spec['fields'], parent_xblock=parent
)
new_ele_dict[spec['id']] = child
course = split_store.persist_xblock_dag(course, revision['user_id'])
# publish "testx.wonderful"
source_course = CourseLocator(org="testx", course="wonderful", run="run", branch=BRANCH_NAME_DRAFT)
to_publish = BlockUsageLocator(
source_course,
block_type='course',
block_id="head23456"
)
destination = CourseLocator(org="testx", course="wonderful", run="run", branch=BRANCH_NAME_PUBLISHED)
split_store.copy("test@edx.org", source_course, destination, [to_publish], None)
def setUp(self):
super(SplitModuleTest, self).setUp()
self.user_id = random.getrandbits(32)
def tearDown(self):
"""
Clear persistence between each test.
"""
collection_prefix = SplitModuleTest.MODULESTORE['DOC_STORE_CONFIG']['collection'] + '.'
if SplitModuleTest.modulestore:
for collection in ('active_versions', 'structures', 'definitions'):
modulestore().db.drop_collection(collection_prefix + collection)
# drop the modulestore to force re init
SplitModuleTest.modulestore = None
super(SplitModuleTest, self).tearDown()
def findByIdInResult(self, collection, _id): # pylint: disable=invalid-name
"""
Result is a collection of descriptors. Find the one whose block id
matches the _id.
"""
for element in collection:
if element.location.block_id == _id:
return element
class TestHasChildrenAtDepth(SplitModuleTest):
"""Test the has_children_at_depth method of XModuleMixin. """
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_has_children_at_depth(self, _from_json):
course_locator = CourseLocator(
org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT
)
block_locator = BlockUsageLocator(
course_locator, 'course', 'head12345'
)
block = modulestore().get_item(block_locator)
self.assertRaises(
ValueError, block.has_children_at_depth, -1,
)
self.assertTrue(block.has_children_at_depth(0))
self.assertTrue(block.has_children_at_depth(1))
self.assertFalse(block.has_children_at_depth(2))
ch1 = modulestore().get_item(
BlockUsageLocator(course_locator, 'chapter', block_id='chapter1')
)
self.assertFalse(ch1.has_children_at_depth(0))
ch2 = modulestore().get_item(
BlockUsageLocator(course_locator, 'chapter', block_id='chapter2')
)
self.assertFalse(ch2.has_children_at_depth(0))
ch3 = modulestore().get_item(
BlockUsageLocator(course_locator, 'chapter', block_id='chapter3')
)
self.assertTrue(ch3.has_children_at_depth(0))
self.assertFalse(ch3.has_children_at_depth(1))
class SplitModuleCourseTests(SplitModuleTest):
'''
Course CRUD operation tests
'''
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_courses(self, _from_json):
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT)
# should have gotten 3 draft courses
self.assertEqual(len(courses), 3, "Wrong number of courses")
# check metadata -- NOTE no promised order
course = self.findByIdInResult(courses, "head12345")
self.assertEqual(course.location.org, "testx")
self.assertEqual(course.category, 'course', 'wrong category')
self.assertEqual(len(course.tabs), 6, "wrong number of tabs")
self.assertEqual(
course.display_name, "The Ancient Greek Hero",
"wrong display name"
)
self.assertEqual(
course.advertised_start, "Fall 2013",
"advertised_start"
)
self.assertEqual(len(course.children), 3, "children")
# check dates and graders--forces loading of descriptor
self.assertEqual(course.edited_by, "testassist@edx.org")
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.45})
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_org_courses(self, _from_json):
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='guestx')
# should have gotten 1 draft courses
self.assertEqual(len(courses), 1)
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='testx')
# should have gotten 2 draft courses
self.assertEqual(len(courses), 2)
# although this is already covered in other tests, let's
# also not pass in org= parameter to make sure we get back
# 3 courses
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT)
self.assertEqual(len(courses), 3)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_branch_requests(self, _from_json):
# query w/ branch qualifier (both draft and published)
def _verify_published_course(courses_published):
""" Helper function for verifying published course. """
self.assertEqual(len(courses_published), 1, len(courses_published))
course = self.findByIdInResult(courses_published, "head23456")
self.assertIsNotNone(course, "published courses")
self.assertEqual(course.location.course_key.org, "testx")
self.assertEqual(course.location.course_key.course, "wonderful")
self.assertEqual(course.category, 'course', 'wrong category')
self.assertEqual(len(course.tabs), 4, "wrong number of tabs")
self.assertEqual(course.display_name, "The most wonderful course",
course.display_name)
self.assertIsNone(course.advertised_start)
self.assertEqual(len(course.children), 0,
"children")
_verify_published_course(modulestore().get_courses(branch=BRANCH_NAME_PUBLISHED))
def test_has_course(self):
'''
Test the various calling forms for has_course
'''
check_has_course_method(
modulestore(),
CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_DRAFT),
locator_key_fields=['org', 'course', 'run']
)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_course(self, _from_json):
'''
Test the various calling forms for get_course
'''
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
head_course = modulestore().get_course(locator)
self.assertNotEqual(head_course.location.version_guid, head_course.previous_version)
locator = CourseLocator(version_guid=head_course.previous_version)
course = modulestore().get_course(locator)
self.assertIsNone(course.location.course_key.org)
self.assertEqual(course.location.version_guid, head_course.previous_version)
self.assertEqual(course.category, 'course')
self.assertEqual(len(course.tabs), 6)
self.assertEqual(course.display_name, "The Ancient Greek Hero")
self.assertEqual(course.graceperiod, datetime.timedelta(hours=2))
self.assertIsNone(course.advertised_start)
self.assertEqual(len(course.children), 0)
self.assertNotEqual(course.definition_locator.definition_id, head_course.definition_locator.definition_id)
# check dates and graders--forces loading of descriptor
self.assertEqual(course.edited_by, "testassist@edx.org")
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.55})
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(locator)
self.assertEqual(course.location.course_key.org, "testx")
self.assertEqual(course.location.course_key.course, "GreekHero")
self.assertEqual(course.location.course_key.run, "run")
self.assertEqual(course.category, 'course')
self.assertEqual(len(course.tabs), 6)
self.assertEqual(course.display_name, "The Ancient Greek Hero")
self.assertEqual(course.advertised_start, "Fall 2013")
self.assertEqual(len(course.children), 3)
# check dates and graders--forces loading of descriptor
self.assertEqual(course.edited_by, "testassist@edx.org")
self.assertDictEqual(course.grade_cutoffs, {"Pass": 0.45})
locator = CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_PUBLISHED)
course = modulestore().get_course(locator)
published_version = course.location.version_guid
locator = CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(locator)
self.assertNotEqual(course.location.version_guid, published_version)
def test_get_course_negative(self):
# Now negative testing
with self.assertRaises(InsufficientSpecificationError):
modulestore().get_course(CourseLocator(org='edu', course='meh', run='blah'))
with self.assertRaises(ItemNotFoundError):
modulestore().get_course(CourseLocator(org='edu', course='nosuchthing', run="run", branch=BRANCH_NAME_DRAFT))
with self.assertRaises(ItemNotFoundError):
modulestore().get_course(CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_PUBLISHED))
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_cache(self, _from_json):
"""
Test that the mechanics of caching work.
"""
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(locator)
block_map = modulestore().cache_items(
course.system, [BlockKey.from_usage_key(child) for child in course.children], course.id, depth=3
)
self.assertIn(BlockKey('chapter', 'chapter1'), block_map)
self.assertIn(BlockKey('problem', 'problem3_2'), block_map)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_course_successors(self, _from_json):
"""
get_course_successors(course_locator, version_history_depth=1)
"""
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(locator)
versions = [course.location.version_guid, course.previous_version]
locator = CourseLocator(version_guid=course.previous_version)
course = modulestore().get_course(locator)
versions.append(course.previous_version)
locator = CourseLocator(version_guid=course.previous_version)
result = modulestore().get_course_successors(locator)
self.assertIsInstance(result, VersionTree)
self.assertIsNone(result.locator.org)
self.assertEqual(result.locator.version_guid, versions[-1])
self.assertEqual(len(result.children), 1)
self.assertEqual(result.children[0].locator.version_guid, versions[-2])
self.assertEqual(len(result.children[0].children), 0, "descended more than one level")
result = modulestore().get_course_successors(locator, version_history_depth=2)
self.assertEqual(len(result.children), 1)
self.assertEqual(result.children[0].locator.version_guid, versions[-2])
self.assertEqual(len(result.children[0].children), 1)
result = modulestore().get_course_successors(locator, version_history_depth=99)
self.assertEqual(len(result.children), 1)
self.assertEqual(result.children[0].locator.version_guid, versions[-2])
self.assertEqual(len(result.children[0].children), 1)
self.assertEqual(result.children[0].children[0].locator.version_guid, versions[0])
class TestCourseStructureCache(SplitModuleTest):
"""Tests for the CourseStructureCache"""
def setUp(self):
# use the default cache, since the `course_structure_cache`
# is a dummy cache during testing
self.cache = get_cache('default')
# make sure we clear the cache before every test...
self.cache.clear()
# ... and after
self.addCleanup(self.cache.clear)
# make a new course:
self.user = random.getrandbits(32)
self.new_course = modulestore().create_course(
'org', 'course', 'test_run', self.user, BRANCH_NAME_DRAFT,
)
super(TestCourseStructureCache, self).setUp()
@patch('xmodule.modulestore.split_mongo.mongo_connection.get_cache')
def test_course_structure_cache(self, mock_get_cache):
# force get_cache to return the default cache so we can test
# its caching behavior
mock_get_cache.return_value = self.cache
with check_mongo_calls(1):
not_cached_structure = self._get_structure(self.new_course)
# when cache is warmed, we should have one fewer mongo call
with check_mongo_calls(0):
cached_structure = self._get_structure(self.new_course)
# now make sure that you get the same structure
self.assertEqual(cached_structure, not_cached_structure)
@patch('xmodule.modulestore.split_mongo.mongo_connection.get_cache')
def test_course_structure_cache_no_cache_configured(self, mock_get_cache):
mock_get_cache.side_effect = InvalidCacheBackendError
with check_mongo_calls(1):
not_cached_structure = self._get_structure(self.new_course)
# if the cache isn't configured, we expect to have to make
# another mongo call here if we want the same course structure
with check_mongo_calls(1):
cached_structure = self._get_structure(self.new_course)
# now make sure that you get the same structure
self.assertEqual(cached_structure, not_cached_structure)
def test_dummy_cache(self):
with check_mongo_calls(1):
not_cached_structure = self._get_structure(self.new_course)
# Since the test is using the dummy cache, it's not actually caching
# anything
with check_mongo_calls(1):
cached_structure = self._get_structure(self.new_course)
# now make sure that you get the same structure
self.assertEqual(cached_structure, not_cached_structure)
def _get_structure(self, course):
"""
Helper function to get a structure from a course.
"""
return modulestore().db_connection.get_structure(
course.location.as_object_id(course.location.version_guid)
)
class SplitModuleItemTests(SplitModuleTest):
'''
Item read tests including inheritance
'''
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_has_item(self, _from_json):
'''
has_item(BlockUsageLocator)
'''
org = 'testx'
course = 'GreekHero'
run = 'run'
course_locator = CourseLocator(org=org, course=course, run=run, branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(course_locator)
previous_version = course.previous_version
# positive tests of various forms
locator = course.location.map_into_course(CourseLocator(version_guid=previous_version))
self.assertTrue(
modulestore().has_item(locator), "couldn't find in %s" % previous_version
)
locator = course.location.version_agnostic()
self.assertTrue(
modulestore().has_item(locator),
)
self.assertFalse(
modulestore().has_item(
BlockUsageLocator(
locator.course_key.for_branch(BRANCH_NAME_PUBLISHED),
block_type=locator.block_type,
block_id=locator.block_id
)
),
"found in published head"
)
# not a course obj
locator = BlockUsageLocator(course_locator, block_type='chapter', block_id='chapter1')
self.assertTrue(
modulestore().has_item(locator),
"couldn't find chapter1"
)
# in published course
locator = BlockUsageLocator(
CourseLocator(org="testx", course="wonderful", run="run", branch=BRANCH_NAME_DRAFT),
block_type="course",
block_id="head23456"
)
self.assertTrue(
modulestore().has_item(locator.for_branch(BRANCH_NAME_PUBLISHED))
)
def test_negative_has_item(self):
# negative tests--not found
# no such course or block
locator = BlockUsageLocator(
CourseLocator(org="foo", course="doesnotexist", run="run", branch=BRANCH_NAME_DRAFT),
block_type="course",
block_id="head23456"
)
self.assertFalse(modulestore().has_item(locator))
locator = BlockUsageLocator(
CourseLocator(org="testx", course="wonderful", run="run", branch=BRANCH_NAME_DRAFT),
block_type="vertical",
block_id="doesnotexist"
)
self.assertFalse(modulestore().has_item(locator))
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_item(self, _from_json):
'''
get_item(blocklocator)
'''
hero_locator = CourseLocator(org="testx", course="GreekHero", run="run", branch=BRANCH_NAME_DRAFT)
course = modulestore().get_course(hero_locator)
previous_version = course.previous_version
# positive tests of various forms
locator = course.location.map_into_course(CourseLocator(version_guid=previous_version))
block = modulestore().get_item(locator)
self.assertIsInstance(block, CourseDescriptor)
self.assertIsInstance(modulestore().get_item(locator), CourseDescriptor)
def verify_greek_hero(block):
"""
Check contents of block
"""
self.assertEqual(block.location.org, "testx")
self.assertEqual(block.location.course, "GreekHero")
self.assertEqual(block.location.run, "run")
self.assertEqual(len(block.tabs), 6, "wrong number of tabs")
self.assertEqual(block.display_name, "The Ancient Greek Hero")
self.assertEqual(block.advertised_start, "Fall 2013")
self.assertEqual(len(block.children), 3)
# check dates and graders--forces loading of descriptor
self.assertEqual(block.edited_by, "testassist@edx.org")
self.assertDictEqual(
block.grade_cutoffs, {"Pass": 0.45},
)
verify_greek_hero(modulestore().get_item(course.location))
# try to look up other branches
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(course.location.for_branch(BRANCH_NAME_PUBLISHED))
def test_get_non_root(self):
# not a course obj
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'chapter', 'chapter1'
)
block = modulestore().get_item(locator)
self.assertEqual(block.location.org, "testx")
self.assertEqual(block.location.course, "GreekHero")
self.assertEqual(block.category, 'chapter')
self.assertEqual(block.display_name, "Hercules")
self.assertEqual(block.edited_by, "testassist@edx.org")
# in published course
locator = BlockUsageLocator(
CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_PUBLISHED), 'course', 'head23456'
)
self.assertIsInstance(
modulestore().get_item(locator),
CourseDescriptor
)
# negative tests--not found
# no such course or block
locator = BlockUsageLocator(
CourseLocator(org='doesnotexist', course='doesnotexist', run="run", branch=BRANCH_NAME_DRAFT), 'course', 'head23456'
)
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(locator)
locator = BlockUsageLocator(
CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_DRAFT), 'html', 'doesnotexist'
)
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(locator)
# pylint: disable=protected-access
def test_matching(self):
'''
test the block and value matches help functions
'''
self.assertTrue(modulestore()._value_matches('help', 'help'))
self.assertFalse(modulestore()._value_matches('help', 'Help'))
self.assertTrue(modulestore()._value_matches(['distract', 'help', 'notme'], 'help'))
self.assertFalse(modulestore()._value_matches(['distract', 'Help', 'notme'], 'help'))
self.assertFalse(modulestore()._block_matches({'field': ['distract', 'Help', 'notme']}, {'field': 'help'}))
self.assertTrue(modulestore()._block_matches(
{'field': ['distract', 'help', 'notme'],
'irrelevant': 2},
{'field': 'help'}))
self.assertTrue(modulestore()._value_matches('I need some help', re.compile(r'help')))
self.assertTrue(modulestore()._value_matches(['I need some help', 'today'], re.compile(r'help')))
self.assertFalse(modulestore()._value_matches('I need some help', re.compile(r'Help')))
self.assertTrue(modulestore()._value_matches(['I need some help', 'today'], re.compile(r'Help', re.IGNORECASE)))
self.assertTrue(modulestore()._value_matches('gotcha', {'$in': ['a', 'bunch', 'of', 'gotcha']}))
self.assertFalse(modulestore()._value_matches('gotcha', {'$in': ['a', 'bunch', 'of', 'gotchas']}))
self.assertFalse(modulestore()._value_matches('gotcha', {'$nin': ['a', 'bunch', 'of', 'gotcha']}))
self.assertTrue(modulestore()._value_matches('gotcha', {'$nin': ['a', 'bunch', 'of', 'gotchas']}))
self.assertTrue(modulestore()._block_matches({'group_access': {'1': [1]}}, {'group_access': {'$exists': True}}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'group_access': {'$exists': False}}))
self.assertTrue(modulestore()._block_matches(
{'a': 1, 'group_access': {'1': [1]}},
{'a': 1, 'group_access': {'$exists': True}}))
self.assertFalse(modulestore()._block_matches(
{'a': 1, 'group_access': {'1': [1]}},
{'a': 111, 'group_access': {'$exists': True}}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 1, 'group_access': {'$exists': False}}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 9, 'group_access': {'$exists': False}}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 1}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 2}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'c': 1}))
self.assertFalse(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': 1, 'c': 1}))
self.assertTrue(modulestore()._block_matches({'a': 1, 'b': 2}, {'a': lambda i: 0 < i < 2}))
def test_get_items(self):
'''
get_items(locator, qualifiers, [branch])
'''
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
# get all modules
matches = modulestore().get_items(locator)
self.assertEqual(len(matches), 7)
matches = modulestore().get_items(locator)
self.assertEqual(len(matches), 7)
matches = modulestore().get_items(locator, qualifiers={'category': 'chapter'})
self.assertEqual(len(matches), 3)
matches = modulestore().get_items(locator, qualifiers={'category': 'garbage'})
self.assertEqual(len(matches), 0)
matches = modulestore().get_items(
locator,
qualifiers={'category': 'chapter'},
settings={'display_name': re.compile(r'Hera')},
)
self.assertEqual(len(matches), 2)
matches = modulestore().get_items(locator, settings={'group_access': {'$exists': True}})
self.assertEqual(len(matches), 1)
matches = modulestore().get_items(locator, settings={'group_access': {'$exists': False}})
self.assertEqual(len(matches), 6)
def test_get_parents(self):
'''
get_parent_location(locator): BlockUsageLocator
'''
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT),
'chapter', block_id='chapter1'
)
parent = modulestore().get_parent_location(locator)
self.assertIsNotNone(parent)
self.assertEqual(parent.block_id, 'head12345')
self.assertEqual(parent.org, "testx")
self.assertEqual(parent.course, "GreekHero")
locator = locator.course_key.make_usage_key('chapter', 'chapter2')
parent = modulestore().get_parent_location(locator)
self.assertIsNotNone(parent)
self.assertEqual(parent.block_id, 'head12345')
locator = locator.course_key.make_usage_key('garbage', 'nosuchblock')
parent = modulestore().get_parent_location(locator)
self.assertIsNone(parent)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_get_children(self, _from_json):
"""
Test the existing get_children method on xdescriptors
"""
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'course', 'head12345'
)
block = modulestore().get_item(locator)
children = block.get_children()
expected_ids = [
"chapter1", "chapter2", "chapter3"
]
for child in children:
self.assertEqual(child.category, "chapter")
self.assertIn(child.location.block_id, expected_ids)
expected_ids.remove(child.location.block_id)
self.assertEqual(len(expected_ids), 0)
def version_agnostic(children):
"""
children: list of descriptors
Returns the `children` list with each member version-agnostic
"""
return [child.version_agnostic() for child in children]
class TestItemCrud(SplitModuleTest):
"""
Test create update and delete of items
"""
# DHM do I need to test this case which I believe won't work:
# 1) fetch a course and some of its blocks
# 2) do a series of CRUD operations on those previously fetched elements
# The problem here will be that the version_guid of the items will be the version at time of fetch.
# Each separate save will change the head version; so, the 2nd piecemeal change will flag the version
# conflict. That is, if versions are v0..vn and start as v0 in initial fetch, the first CRUD op will
# say it's changing an object from v0, splitMongo will process it and make the current head v1, the next
# crud op will pass in its v0 element and splitMongo will flag the version conflict.
# What I don't know is how realistic this test is and whether to wrap the modulestore with a higher level
# transactional operation which manages the version change or make the threading cache reason out whether or
# not the changes are independent and additive and thus non-conflicting.
# A use case I expect is
# (client) change this metadata
# (server) done, here's the new info which, btw, updates the course version to v1
# (client) add these children to this other node (which says it came from v0 or
# will the client have refreshed the version before doing the op?)
# In this case, having a server side transactional model won't help b/c the bug is a long-transaction on the
# on the client where it would be a mistake for the server to assume anything about client consistency. The best
# the server could do would be to see if the parent's children changed at all since v0.
def test_create_minimal_item(self):
"""
create_item(user, location, category, definition_locator=None, fields): new_desciptor
"""
# grab link to course to ensure new versioning works
locator = CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT)
premod_course = modulestore().get_course(locator)
premod_history = modulestore().get_course_history_info(locator)
# add minimal one w/o a parent
category = 'sequential'
new_module = modulestore().create_item(
'user123', locator, category,
fields={'display_name': 'new sequential'}
)
# check that course version changed and course's previous is the other one
self.assertEqual(new_module.location.course, "GreekHero")
self.assertNotEqual(new_module.location.version_guid, premod_course.location.version_guid)
self.assertIsNone(locator.version_guid, "Version inadvertently filled in")
current_course = modulestore().get_course(locator)
self.assertEqual(new_module.location.version_guid, current_course.location.version_guid)
history_info = modulestore().get_course_history_info(current_course.location.course_key)
self.assertEqual(history_info['previous_version'], premod_course.location.version_guid)
self.assertEqual(history_info['original_version'], premod_history['original_version'])
self.assertEqual(history_info['edited_by'], "user123")
# check block's info: category, definition_locator, and display_name
self.assertEqual(new_module.category, 'sequential')
self.assertIsNotNone(new_module.definition_locator)
self.assertEqual(new_module.display_name, 'new sequential')
# check that block does not exist in previous version
locator = new_module.location.map_into_course(
CourseLocator(version_guid=premod_course.location.version_guid)
)
with self.assertRaises(ItemNotFoundError):
modulestore().get_item(locator)
def test_create_parented_item(self):
"""
Test create_item w/ specifying the parent of the new item
"""
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT),
'chapter', block_id='chapter2'
)
original = modulestore().get_item(locator)
locator = BlockUsageLocator(
CourseLocator(org='testx', course='wonderful', run="run", branch=BRANCH_NAME_DRAFT), 'course', 'head23456'
)
premod_course = modulestore().get_course(locator.course_key)
category = 'chapter'
new_module = modulestore().create_child(
'user123', locator, category,
fields={'display_name': 'new chapter'},
definition_locator=original.definition_locator
)
# check that course version changed and course's previous is the other one
self.assertNotEqual(new_module.location.version_guid, premod_course.location.version_guid)
parent = modulestore().get_item(locator)
self.assertIn(new_module.location.version_agnostic(), version_agnostic(parent.children))
self.assertEqual(new_module.definition_locator.definition_id, original.definition_locator.definition_id)
def test_unique_naming(self):
"""
Check that 2 modules of same type get unique block_ids. Also check that if creation provides
a definition id and new def data that it branches the definition in the db.
Actually, this tries to test all create_item features not tested above.
"""
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT),
'problem', block_id='problem1'
)
original = modulestore().get_item(locator)
locator = BlockUsageLocator(
CourseLocator(org='guestx', course='contender', run="run", branch=BRANCH_NAME_DRAFT), 'course', 'head345679'
)
category = 'problem'
new_payload = "<problem>empty</problem>"
new_module = modulestore().create_child(
'anotheruser', locator, category,
fields={'display_name': 'problem 1', 'data': new_payload},
)
another_payload = "<problem>not empty</problem>"
another_module = modulestore().create_child(
'anotheruser', locator, category,
fields={'display_name': 'problem 2', 'data': another_payload},
definition_locator=original.definition_locator,
)
# check that course version changed and course's previous is the other one
parent = modulestore().get_item(locator)
self.assertNotEqual(new_module.location.block_id, another_module.location.block_id)
self.assertIn(new_module.location.version_agnostic(), version_agnostic(parent.children))
self.assertIn(another_module.location.version_agnostic(), version_agnostic(parent.children))
self.assertEqual(new_module.data, new_payload)
self.assertEqual(another_module.data, another_payload)
# check definition histories
new_history = modulestore().get_definition_history_info(new_module.definition_locator)
self.assertIsNone(new_history['previous_version'])
self.assertEqual(new_history['original_version'], new_module.definition_locator.definition_id)
self.assertEqual(new_history['edited_by'], "anotheruser")
another_history = modulestore().get_definition_history_info(another_module.definition_locator)
self.assertEqual(another_history['previous_version'], original.definition_locator.definition_id)
def test_encoded_naming(self):
"""
Check that using odd characters in block id don't break ability to add and retrieve block.
"""
course_key = CourseLocator(org='guestx', course='contender', run="run", branch=BRANCH_NAME_DRAFT)
parent_locator = BlockUsageLocator(course_key, 'course', block_id="head345679")
chapter_locator = BlockUsageLocator(course_key, 'chapter', block_id="foo.bar_-~:0")
modulestore().create_child(
'anotheruser', parent_locator, 'chapter',
block_id=chapter_locator.block_id,
fields={'display_name': 'chapter 99'},
)
# check that course version changed and course's previous is the other one
new_module = modulestore().get_item(chapter_locator)
self.assertEqual(new_module.location.block_id, "foo.bar_-~:0") # hardcode to ensure BUL init didn't change
# now try making that a parent of something
new_payload = "<problem>empty</problem>"
problem_locator = BlockUsageLocator(course_key, 'problem', block_id="prob.bar_-~:99a")
modulestore().create_child(
'anotheruser', chapter_locator, 'problem',
block_id=problem_locator.block_id,
fields={'display_name': 'chapter 99', 'data': new_payload},
)
# check that course version changed and course's previous is the other one
new_module = modulestore().get_item(problem_locator)
self.assertEqual(new_module.location.block_id, problem_locator.block_id)
chapter = modulestore().get_item(chapter_locator)
self.assertIn(problem_locator, version_agnostic(chapter.children))
def test_create_bulk_operations(self):
"""
Test create_item using bulk_operations
"""
# start transaction w/ simple creation
user = random.getrandbits(32)
course_key = CourseLocator('test_org', 'test_transaction', 'test_run')
with modulestore().bulk_operations(course_key):
new_course = modulestore().create_course('test_org', 'test_transaction', 'test_run', user, BRANCH_NAME_DRAFT)
new_course_locator = new_course.id
index_history_info = modulestore().get_course_history_info(new_course.location.course_key)
course_block_prev_version = new_course.previous_version
course_block_update_version = new_course.update_version
self.assertIsNotNone(new_course_locator.version_guid, "Want to test a definite version")
versionless_course_locator = new_course_locator.version_agnostic()
# positive simple case: no force, add chapter
new_ele = modulestore().create_child(
user, new_course.location, 'chapter',
fields={'display_name': 'chapter 1'},
)
# version info shouldn't change
self.assertEqual(new_ele.update_version, course_block_update_version)
self.assertEqual(new_ele.update_version, new_ele.location.version_guid)
refetch_course = modulestore().get_course(versionless_course_locator)
self.assertEqual(refetch_course.location.version_guid, new_course.location.version_guid)
self.assertEqual(refetch_course.previous_version, course_block_prev_version)
self.assertEqual(refetch_course.update_version, course_block_update_version)
refetch_index_history_info = modulestore().get_course_history_info(refetch_course.location.course_key)
self.assertEqual(refetch_index_history_info, index_history_info)
self.assertIn(new_ele.location.version_agnostic(), version_agnostic(refetch_course.children))
# try to create existing item
with self.assertRaises(DuplicateItemError):
_fail = modulestore().create_child(
user, new_course.location, 'chapter',
block_id=new_ele.location.block_id,
fields={'display_name': 'chapter 2'},
)
# start a new transaction
with modulestore().bulk_operations(course_key):
new_ele = modulestore().create_child(
user, new_course.location, 'chapter',
fields={'display_name': 'chapter 2'},
)
transaction_guid = new_ele.location.version_guid
# ensure force w/ continue gives exception
with self.assertRaises(VersionConflictError):
_fail = modulestore().create_child(
user, new_course.location, 'chapter',
fields={'display_name': 'chapter 2'},
force=True
)
# ensure trying to continue the old one gives exception
with self.assertRaises(VersionConflictError):
_fail = modulestore().create_child(
user, new_course.location, 'chapter',
fields={'display_name': 'chapter 3'},
)
# add new child to old parent in continued (leave off version_guid)
course_module_locator = new_course.location.version_agnostic()
new_ele = modulestore().create_child(
user, course_module_locator, 'chapter',
fields={'display_name': 'chapter 4'},
)
self.assertNotEqual(new_ele.update_version, course_block_update_version)
self.assertEqual(new_ele.location.version_guid, transaction_guid)
# check children, previous_version
refetch_course = modulestore().get_course(versionless_course_locator)
self.assertIn(new_ele.location.version_agnostic(), version_agnostic(refetch_course.children))
self.assertEqual(refetch_course.previous_version, course_block_update_version)
self.assertEqual(refetch_course.update_version, transaction_guid)
def test_bulk_ops_org_filtering(self):
"""
Make sure of proper filtering when using bulk operations and
calling get_courses with an 'org' filter
"""
# start transaction w/ simple creation
user = random.getrandbits(32)
course_key = CourseLocator('test_org', 'test_transaction', 'test_run')
with modulestore().bulk_operations(course_key):
modulestore().create_course('test_org', 'test_transaction', 'test_run', user, BRANCH_NAME_DRAFT)
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='test_org')
self.assertEqual(len(courses), 1)
self.assertEqual(courses[0].id.org, course_key.org)
self.assertEqual(courses[0].id.course, course_key.course)
self.assertEqual(courses[0].id.run, course_key.run)
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='other_org')
self.assertEqual(len(courses), 0)
# re-assert after the end of the with scope
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='test_org')
self.assertEqual(len(courses), 1)
self.assertEqual(courses[0].id.org, course_key.org)
self.assertEqual(courses[0].id.course, course_key.course)
self.assertEqual(courses[0].id.run, course_key.run)
courses = modulestore().get_courses(branch=BRANCH_NAME_DRAFT, org='other_org')
self.assertEqual(len(courses), 0)
def test_update_metadata(self):
"""
test updating an items metadata ensuring the definition doesn't version but the course does if it should
"""
locator = BlockUsageLocator(
CourseLocator(org="testx", course="GreekHero", run="run", branch=BRANCH_NAME_DRAFT),
'problem', block_id="problem3_2"
)
problem = modulestore().get_item(locator)
pre_def_id = problem.definition_locator.definition_id
pre_version_guid = problem.location.version_guid
self.assertIsNotNone(pre_def_id)
self.assertIsNotNone(pre_version_guid)
self.assertNotEqual(problem.max_attempts, 4, "Invalidates rest of test")
problem.max_attempts = 4
problem.save() # decache above setting into the kvs
updated_problem = modulestore().update_item(problem, self.user_id)
# check that course version changed and course's previous is the other one
self.assertEqual(updated_problem.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_problem.location.version_guid, pre_version_guid)
self.assertEqual(updated_problem.max_attempts, 4)
# refetch to ensure original didn't change
original_location = problem.location.map_into_course(CourseLocator(version_guid=pre_version_guid))
problem = modulestore().get_item(original_location)
self.assertNotEqual(problem.max_attempts, 4, "original changed")
current_course = modulestore().get_course(locator.course_key)
self.assertEqual(updated_problem.location.version_guid, current_course.location.version_guid)
history_info = modulestore().get_course_history_info(current_course.location.course_key)
self.assertEqual(history_info['previous_version'], pre_version_guid)
self.assertEqual(history_info['edited_by'], self.user_id)
def test_update_children(self):
"""
test updating an item's children ensuring the definition doesn't version but the course does if it should
"""
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'chapter', 'chapter3'
)
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
# reorder children
self.assertGreater(len(block.children), 0, "meaningless test")
moved_child = block.children.pop()
block.save() # decache model changes
updated_problem = modulestore().update_item(block, self.user_id)
# check that course version changed and course's previous is the other one
self.assertEqual(updated_problem.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_problem.location.version_guid, pre_version_guid)
self.assertEqual(version_agnostic(updated_problem.children), version_agnostic(block.children))
self.assertNotIn(moved_child, version_agnostic(updated_problem.children))
locator = locator.course_key.make_usage_key('chapter', "chapter1")
other_block = modulestore().get_item(locator)
other_block.children.append(moved_child)
other_updated = modulestore().update_item(other_block, self.user_id)
self.assertIn(moved_child.version_agnostic(), version_agnostic(other_updated.children))
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
def test_update_definition(self, _from_json):
"""
test updating an item's definition: ensure it gets versioned as well as the course getting versioned
"""
locator = BlockUsageLocator(
CourseLocator(org='testx', course='GreekHero', run="run", branch=BRANCH_NAME_DRAFT), 'course', 'head12345'
)
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
block.grading_policy['GRADER'][0]['min_count'] = 13
block.save() # decache model changes
updated_block = modulestore().update_item(block, self.user_id)
self.assertNotEqual(updated_block.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_block.location.version_guid, pre_version_guid)
self.assertEqual(updated_block.grading_policy['GRADER'][0]['min_count'], 13)
def test_update_manifold(self):
"""
Test updating metadata, children, and definition in a single call ensuring all the versioning occurs
"""
locator = BlockUsageLocator(
CourseLocator('testx', 'GreekHero', 'run', branch=BRANCH_NAME_DRAFT),
'problem', block_id='problem1'
)
original = modulestore().get_item(locator)
# first add 2 children to the course for the update to manipulate
locator = BlockUsageLocator(
CourseLocator('guestx', 'contender', 'run', branch=BRANCH_NAME_DRAFT),
'course', block_id="head345679"
)
category = 'problem'
new_payload = "<problem>empty</problem>"
modulestore().create_child(
'test_update_manifold', locator, category,
fields={'display_name': 'problem 1', 'data': new_payload},
)
another_payload = "<problem>not empty</problem>"
modulestore().create_child(
'test_update_manifold', locator, category,
fields={'display_name': 'problem 2', 'data': another_payload},
definition_locator=original.definition_locator,
)
# pylint: disable=protected-access
modulestore()._clear_cache()
# now begin the test
block = modulestore().get_item(locator)
pre_def_id = block.definition_locator.definition_id
pre_version_guid = block.location.version_guid
self.assertNotEqual(block.grading_policy['GRADER'][0]['min_count'], 13)
block.grading_policy['GRADER'][0]['min_count'] = 13
block.children = block.children[1:] + [block.children[0]]
block.advertised_start = "Soon"
block.save() # decache model changes
updated_block = modulestore().update_item(block, self.user_id)
self.assertNotEqual(updated_block.definition_locator.definition_id, pre_def_id)
self.assertNotEqual(updated_block.location.version_guid, pre_version_guid)
self.assertEqual(updated_block.grading_policy['GRADER'][0]['min_count'], 13)
self.assertEqual(updated_block.children[0].version_agnostic(), block.children[0].version_agnostic())
self.assertEqual(updated_block.advertised_start, "Soon")
def test_delete_item(self):
course = self.create_course_for_deletion()
with self.assertRaises(ValueError):
modulestore().delete_item(course.location, self.user_id)
reusable_location = course.id.version_agnostic().for_branch(BRANCH_NAME_DRAFT)
# delete a leaf
problems = modulestore().get_items(reusable_location, qualifiers={'category': 'problem'})
locn_to_del = problems[0].location
new_course_loc = modulestore().delete_item(locn_to_del, self.user_id)
deleted = locn_to_del.version_agnostic()
self.assertFalse(modulestore().has_item(deleted))
with self.assertRaises(VersionConflictError):
modulestore().has_item(locn_to_del)
with self.assertRaises(ValueError):
modulestore().delete_item(deleted, self.user_id)
self.assertTrue(modulestore().has_item(locn_to_del.course_agnostic()))
self.assertNotEqual(new_course_loc.version_guid, course.location.version_guid)
# delete a subtree
nodes = modulestore().get_items(reusable_location, qualifiers={'category': 'chapter'})
new_course_loc = modulestore().delete_item(nodes[0].location, self.user_id)
# check subtree
def check_subtree(node):
"""
Check contents of subtree recursively
"""
if node:
node_loc = node.location
self.assertFalse(
modulestore().has_item(node_loc.version_agnostic())
)
self.assertTrue(modulestore().has_item(node_loc.course_agnostic()))
if node.has_children:
for sub in node.get_children():
check_subtree(sub)
check_subtree(nodes[0])
def create_course_for_deletion(self):
"""
Create a course we can delete
"""
course = modulestore().create_course('nihilx', 'deletion', 'run', 'deleting_user', BRANCH_NAME_DRAFT)
root = course.location.version_agnostic().for_branch(BRANCH_NAME_DRAFT)
for _ in range(4):
self.create_subtree_for_deletion(root, ['chapter', 'vertical', 'problem'])
return modulestore().get_item(root)
def create_subtree_for_deletion(self, parent, category_queue):
"""
Create a subtree in the tb deleted course
"""
if not category_queue:
return
node = modulestore().create_child(
| 'deleting_user', parent.version_agnostic(), category_queue[0] | 4,289 | lcc_e | python | null | 271ef4fcd93c2d0dcdf4e07cc25ce775594a0c381c7b448f |
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import re
import time
import types
import openerp
import openerp.modules.registry
from openerp import SUPERUSER_ID
from openerp import netsvc, pooler, tools
from openerp.osv import fields,osv
from openerp.osv.orm import Model
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools import config
from openerp.tools.translate import _
from openerp.osv.orm import except_orm, browse_record
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool.get("ir.module.module")
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if self.pool.get(model.model):
res[model.id] = self.pool.get(model.model).is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_('Invalid search criterions'), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool.get("ir.ui.view").search(cr, uid, [('model', '=', model.model)])
return res
_columns = {
'name': fields.char('Model Description', size=64, translate=True, required=True),
'model': fields.char('Model', size=64, required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type',readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', size=128, string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool.get(model.model)
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
pooler.restart_pool(cr.dbname)
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
if context and context.get('manual'):
vals['state']='manual'
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','base')=='manual':
self.instanciate(cr, user, vals['model'], context)
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'))
self.pool.get(vals['model'])._auto_init(cr, ctx)
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
class x_custom_model(osv.osv):
_custom = True
x_custom_model._name = model
x_custom_model._module = False
a = x_custom_model.create_instance(self.pool, cr)
if not a._columns:
x_name = 'id'
elif 'x_name' in a._columns.keys():
x_name = 'x_name'
else:
x_name = a._columns.keys()[0]
x_custom_model._rec_name = x_name
a._rec_name = x_name
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_columns = {
'name': fields.char('Name', required=True, size=64, select=1),
'model': fields.char('Object Name', size=64, required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation', size=64,
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field', size=64,
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True, size=256),
'ttype': fields.selection(_get_fields_type, 'Field Type',size=64, required=True),
'selection': fields.char('Selection Options',size=128, help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade','Cascade'),('set null','Set NULL')], 'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', size=256, help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'view_load': fields.boolean('View Auto-Load'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', size=128, string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'view_load': 0,
'selection': "",
'domain': "[]",
'name': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
'on_delete': 'set null',
'select_level': '0',
'size': 64,
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 1 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
model = self.pool.get(field.model)
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
model._columns.pop(field.name, None)
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
cr.commit()
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool.get('ir.model').browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','base') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool.get('ir.model').search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
if self.pool.get(vals['model']):
self.pool.get(vals['model']).__init__(self.pool, cr)
#Added context to _auto_init for special treatment to custom field for select_level
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
self.pool.get(vals['model'])._auto_init(cr, ctx)
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
column_rename = None # if set, *one* column can be renamed here
obj = None
models_patch = {} # structs of (obj, [(field, prop, change_to),..])
# data to be updated on the orm model
# static table of properties
model_props = [ # (our-name, fields.prop, set_fn)
('field_description', 'string', str),
('required', 'required', bool),
('readonly', 'readonly', bool),
('domain', '_domain', eval),
('size', 'size', int),
('on_delete', 'ondelete', str),
('translate', 'translate', bool),
('view_load', 'view_load', bool),
('selectable', 'selectable', bool),
('select_level', 'select', int),
('selection', 'selection', eval),
]
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
if not (obj and obj._name == item.model):
obj = self.pool.get(item.model)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'base') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj:
models_patch.setdefault(obj._name, (obj,[]))
# find out which properties (per model) we need to update
for field_name, field_property, set_fn in model_props:
if field_name in vals:
property_value = set_fn(vals[field_name])
if getattr(obj._columns[item.name], field_property) != property_value:
models_patch[obj._name][1].append((final_name, field_property, property_value))
# our dict is ready here, but no properties are changed so far
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
if column_rename:
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % column_rename[1])
# This is VERY risky, but let us have this feature:
# we want to change the key of column in obj._columns dict
col = column_rename[0]._columns.pop(column_rename[1][1]) # take object out, w/o copy
column_rename[0]._columns[column_rename[1][2]] = col
if models_patch:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context, select=vals.get('select_level', '0'),
update_custom_fields=True)
for __, patch_struct in models_patch.items():
obj = patch_struct[0]
for col_name, col_prop, val in patch_struct[1]:
setattr(obj._columns[col_name], col_prop, val)
obj._auto_init(cr, ctx)
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, size=128, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool.get('ir.model.access').check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool.get(model)
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, size=128, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool.get('ir.model.access').check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', size=64, required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, browse_record):
assert model._table_name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
@tools.ormcache()
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, browse_record):
assert model._table_name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if not self.pool.get(model_name):
_logger.error('Missing model %s' % (model_name, ))
elif self.pool.get(model_name).is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise except_orm(_('Access Denied'), msg % msg_params)
return r or False
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
object_ = self.pool.get(model)
if object_:
getattr(object_, method)()
#
# Check rights on actions
#
def write(self, cr, uid, *args, **argv):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, *args, **argv)
return res
def create(self, cr, uid, *args, **argv):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, *args, **argv)
return res
def unlink(self, cr, uid, *args, **argv):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, *args, **argv)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def _display_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
result2 = {}
for res in self.browse(cr, uid, ids, context=context):
if res.id:
result.setdefault(res.model, {})
result[res.model][res.res_id] = res.id
result2[res.id] = False
for model in result:
try:
r = dict(self.pool.get(model).name_get(cr, uid, result[model].keys(), context=context))
for key,val in result[model].items():
result2[val] = r.get(key, False)
except:
# some object have no valid name_get implemented, we accept this
pass
return result2
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, size=128, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'display_name': fields.function(_display_name_get, type='char', string='Record Name'),
'model': fields.char('Model Name', required=True, size=64, select=1),
'module': fields.char('Module', required=True, size=64, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'date_update': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
self.doinit = True
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
self.loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
@tools.ormcache()
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
ids = self.search(cr, uid, [('module','=',module), ('name','=', xml_id)])
if not ids:
raise ValueError('No such external ID currently defined in the system: %s.%s' % (module, xml_id))
# the sql constraints ensure us we have only one result
return ids[0]
@tools.ormcache()
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
data_id = self._get_id(cr, uid, module, xml_id)
res = self.read(cr, uid, data_id, ['model', 'res_id'])
if not res['res_id']:
raise ValueError('No such external ID currently defined in the system: %s.%s' % (module, xml_id))
return res['model'], res['res_id']
def get_object(self, cr, uid, module, xml_id, context=None):
"""Returns a browsable record for the given module name and xml_id or raise ValueError if not found"""
res_model, res_id = self.get_object_reference(cr, uid, module, xml_id)
result = self.pool.get(res_model).browse(cr, uid, res_id, context=context)
if not result.exists():
raise ValueError('No record found for unique ID %s.%s. It may have been deleted.' % (module, xml_id))
return result
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
try:
id = self.read(cr, uid, [self._get_id(cr, uid, module, xml_id)], ['res_id'])[0]['res_id']
self.loads[(module,xml_id)] = (model,id)
except:
id = False
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self._get_id.clear_cache(self)
self.get_object_reference.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool.get(model)
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
if (not xml_id) and (not self.doinit):
return False
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model in results:
if not real_id2:
self._get_id.clear_cache(self, uid, module, xml_id)
self.get_object_reference.clear_cache(self, uid, module, xml_id)
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, uid, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, res_id,
[inherit_field])[inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
if isinstance(models[0], (list, tuple)):
model,res_id = models[0]
else:
res_id=None
model = models[0]
if res_id:
where = ' and res_id=%s' % (res_id,)
else:
where = ' and (res_id is null)'
if key2:
where += ' and key2=\'%s\'' % (key2,)
else:
where += ' and (key2 is null)'
cr.execute('select * from ir_values where model=%s and key=%s and name=%s'+where,(model, key, name))
res = cr.fetchone()
if not res:
| ir_values_obj = pooler.get_pool(cr.dbname).get('ir.values') | 4,578 | lcc_e | python | null | d67076d9d7bf62b5c38c0440629a0ded22f13c821ac9e30b |
|
import os
import re
import json
import time
import subprocess
from decimal import Decimal
from urllib import urlopen, quote, unquote_plus
from django.conf import settings
from django.views import generic
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group
from django.views.decorators.http import require_POST
from django import forms
from django.template import RequestContext
from django.core.context_processors import csrf
from django.core.mail import EmailMultiAlternatives
from django.core.exceptions import PermissionDenied
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, render_to_response
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from cms.sortable import *
from creation.forms import *
from creation.models import *
from creation.subtitles import *
def humansize(nbytes):
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.1f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
def get_page(resource, page, page_count = 20):
paginator = Paginator(resource, page_count)
try:
resource = paginator.page(page)
except PageNotAnInteger:
resource = paginator.page(1)
except EmptyPage:
resource = paginator.page(paginator.num_pages)
return resource
def is_contributor(user):
"""Check if the user is having contributor rights"""
if user.groups.filter(Q(name='Contributor')|Q(name='External-Contributor')).count():
return True
return False
def is_internal_contributor(user):
"""Check if the user is having contributor rights"""
if user.groups.filter(name='Contributor').count():
return True
return False
def is_external_contributor(user):
"""Check if the user is having external-contributor rights"""
if user.groups.filter(name='External-Contributor').count():
return True
return False
def is_videoreviewer(user):
"""Check if the user is having video reviewer rights"""
if user.groups.filter(name='Video-Reviewer').count() == 1:
return True
return False
def is_domainreviewer(user):
"""Check if the user is having domain reviewer rights"""
if user.groups.filter(name='Domain-Reviewer').count() == 1:
return True
return False
def is_qualityreviewer(user):
"""Check if the user is having quality reviewer rights"""
if user.groups.filter(name='Quality-Reviewer').count() == 1:
return True
return False
def is_administrator(user):
"""Check if the user is having administrator rights"""
if user.groups.filter(name='Administrator').count():
return True
return False
def get_filesize(path):
filesize_bytes = os.path.getsize(path)
return humansize(filesize_bytes)
# returns video meta info using ffmpeg
def get_video_info(path):
"""Uses ffmpeg to determine information about a video."""
info_m = {}
try:
process = subprocess.Popen(['/usr/bin/ffmpeg', '-i', path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
duration_m = re.search(r"Duration:\s{1}(?P<hours>\d+?):(?P<minutes>\d+?):(?P<seconds>\d+\.\d+?)", stdout, re.DOTALL).groupdict()
info_m = re.search(r": Video: (?P<codec>.*?), (?P<profile>.*?), (?P<width>.*?)x(?P<height>.*?), ", stdout, re.DOTALL).groupdict()
hours = Decimal(duration_m['hours'])
minutes = Decimal(duration_m['minutes'])
seconds = Decimal(duration_m['seconds'])
total = 0
total += 60 * 60 * hours
total += 60 * minutes
total += seconds
info_m['hours'] = hours
info_m['minutes'] = minutes
info_m['seconds'] = seconds
tmp_seconds = str(int(seconds))
if seconds < 10:
tmp_seconds = "0" + tmp_seconds
info_m['duration'] = duration_m['hours'] + ':' + duration_m['minutes'] + ":" + tmp_seconds
info_m['total'] = int(total)
info_m['width'] = int(info_m['width'])
# [PAR 1i:1 DAR 3:2] error in height
info_m['height'] = int(info_m['height'].split()[0])
info_m['size'] = get_filesize(path)
except:
info_m['codec'] = ''
info_m['profile'] = ''
info_m['hours'] = 0
info_m['minutes'] = 0
info_m['seconds'] = 0
info_m['duration'] = 0
info_m['total'] = 0
info_m['width'] = 0
info_m['height'] = 0
info_m['size'] = 0
return info_m
#create_thumbnail(tr_rec, 'Big', tr_rec.video_thumbnail_time, '700:500')
def create_thumbnail(row, attach_str, thumb_time, thumb_size):
filepath = settings.MEDIA_ROOT + 'videos/' + str(row.tutorial_detail.foss_id) + '/' + str(row.tutorial_detail_id) + '/'
filename = row.tutorial_detail.tutorial.replace(' ', '-') + '-' + attach_str + '.png'
try:
#process = subprocess.Popen(['/usr/bin/ffmpeg', '-i ' + filepath + row.video + ' -r ' + str(30) + ' -ss ' + str(thumb_time) + ' -s ' + thumb_size + ' -vframes ' + str(1) + ' -f ' + 'image2 ' + filepath + filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
process = subprocess.Popen(['/usr/bin/ffmpeg', '-i', filepath + row.video, '-r', str(30), '-ss', str(thumb_time), '-s', thumb_size, '-vframes', str(1), '-f', 'image2', filepath + filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
if stderr:
print filepath + filename
print stderr
except Exception, e:
print 1, e
pass
def add_qualityreviewer_notification(tr_rec, comp_title, message):
dr_roles = QualityReviewerRole.objects.filter(foss_category = tr_rec.tutorial_detail.foss, language = tr_rec.language, status = 1)
for dr_role in dr_roles:
QualityReviewerNotification.objects.create(user = dr_role.user, title = comp_title, message = message, tutorial_resource = tr_rec)
def add_domainreviewer_notification(tr_rec, comp_title, message):
dr_roles = DomainReviewerRole.objects.filter(foss_category = tr_rec.tutorial_detail.foss, language = tr_rec.language, status = 1)
for dr_role in dr_roles:
DomainReviewerNotification.objects.create(user = dr_role.user, title = comp_title, message = message, tutorial_resource = tr_rec)
def add_adminreviewer_notification(tr_rec, comp_title, message):
role = Group.objects.get(name = 'Video-Reviewer')
users = role.user_set.all()
for user in users:
AdminReviewerNotification.objects.create(user = user, title = comp_title, message = message, tutorial_resource = tr_rec)
def add_contributor_notification(tr_rec, comp_title, message):
con_roles = ContributorRole.objects.filter(foss_category = tr_rec.tutorial_detail.foss, language = tr_rec.language, status = 1)
for con in con_roles:
ContributorNotification.objects.create(user = con.user, title = comp_title, message = message, tutorial_resource = tr_rec)
@login_required
def creation_add_role(request, role_type):
flag = 1
roles = {
'contributor': 0,
'external-contributor': 1,
'video-reviewer': 2,
'domain-reviewer': 3,
'quality-reviewer': 4,
}
if role_type in roles:
try:
RoleRequest.objects.create(user = request.user, role_type = roles[role_type], status = 0)
except:
try:
role_rec = RoleRequest.objects.get(user = request.user, role_type = roles[role_type], status = 2)
role_rec.status = 0
role_rec.save()
except:
flag = 0
messages.warning(request, 'Request to the ' + role_type.title() + ' role is already waiting for admin approval!')
else:
flag = 0
messages.error(request, 'Invalid role argument!')
if flag:
messages.success(request, 'Request to the ' + role_type.title() + ' role has been sent for admin approval!')
return HttpResponseRedirect('/creation/')
@login_required
def creation_accept_role_request(request, recid):
if is_administrator:
roles = {
0: 'Contributor',
1: 'External-Contributor',
2: 'Video-Reviewer',
3: 'Domain-Reviewer',
4: 'Quality-Reviewer',
}
try:
role_rec = RoleRequest.objects.get(pk = recid, status = 0)
if role_rec.role_type in roles:
try:
role_rec.user.groups.add(Group.objects.get(name = roles[role_rec.role_type]))
role_rec.approved_user = request.user
role_rec.status = 1
role_rec.save()
messages.success(request, roles[role_rec.role_type] + ' role is added to ' + role_rec.user.username)
except:
messages.error(request, role_rec.user.username + ' is already having ' + roles[role_rec.role_type] + ' role.')
else:
messages.error(request, 'Invalid role argument!')
except:
messages.error(request, 'The given role request id is either invalid or it is already accepted')
else:
raise PermissionDenied()
return HttpResponseRedirect('/creation/role/requests/' + roles[role_rec.role_type].lower() + '/')
@login_required
def creation_reject_role_request(request, recid):
if is_administrator:
print "test 2"
roles = {
0: 'Contributor',
1: 'External-Contributor',
2: 'Video-Reviewer',
3: 'Domain-Reviewer',
4: 'Quality-Reviewer',
}
try:
role_rec = RoleRequest.objects.get(pk = recid, status = 0)
role_rec.delete()
messages.success(request, 'Selected role request has been deleted successfully!')
except:
messages.error(request, 'The given role request id is either invalid or it is already reject')
else:
raise PermissionDenied()
return HttpResponseRedirect('/creation/role/requests/' + roles[role_rec.role_type].lower() + '/')
@login_required
def creation_revoke_role_request(request, role_type):
roles = {
'contributor': 0,
'external-contributor': 1,
'video-reviewer': 2,
'domain-reviewer': 3,
'quality-reviewer': 4,
}
if role_type in roles:
try:
role_rec = RoleRequest.objects.get(user = request.user, role_type = roles[role_type], status = 1)
if role_rec.role_type != 2:
if role_rec.role_type == 0 or role_rec.role_type == 1:
ContributorRole.objects.filter(user = role_rec.user).update(status = 0)
elif role_rec.role_type == 3:
DomainReviewerRole.objects.filter(user = role_rec.user).update(status = 0)
elif role_rec.role_type == 4:
QualityReviewerRole.objects.filter(user = role_rec.user).update(status = 0)
role_rec.user.groups.remove(Group.objects.get(name = role_type.title()))
role_rec.status = 2
role_rec.save()
messages.success(request, role_type.title() + ' role has been revoked from ' + role_rec.user.username)
except:
raise PermissionDenied()
else:
messages.error(request, 'Invalid role type argument!')
return HttpResponseRedirect('/creation/')
@login_required
def creation_list_role_requests(request, tabid = 'contributor'):
if is_administrator:
contrib_recs = RoleRequest.objects.filter(role_type = 0, status = 0).order_by('-updated')
ext_contrib_recs = RoleRequest.objects.filter(role_type = 1, status = 0).order_by('-updated')
admin_recs = RoleRequest.objects.filter(role_type = 2, status = 0).order_by('-updated')
domain_recs = RoleRequest.objects.filter(role_type = 3, status = 0).order_by('-updated')
quality_recs = RoleRequest.objects.filter(role_type = 4, status = 0).order_by('-updated')
context = {
'tabid': tabid,
'contrib_recs': contrib_recs,
'ext_contrib_recs': ext_contrib_recs,
'admin_recs': admin_recs,
'domain_recs': domain_recs,
'quality_recs': quality_recs,
}
return render(request, 'creation/templates/creation_list_role_requests.html', context)
else:
raise PermissionDenied()
@login_required
def init_creation_app(request):
try:
if Group.objects.filter(name = 'Contributor').count() == 0:
Group.objects.create(name = 'Contributor')
if Group.objects.filter(name = 'External-Contributor').count() == 0:
Group.objects.create(name = 'External-Contributor')
if Group.objects.filter(name = 'Video-Reviewer').count() == 0:
Group.objects.create(name = 'Video-Reviewer')
if Group.objects.filter(name = 'Domain-Reviewer').count() == 0:
Group.objects.create(name = 'Domain-Reviewer')
if Group.objects.filter(name = 'Quality-Reviewer').count() == 0:
Group.objects.create(name = 'Quality-Reviewer')
if Group.objects.filter(name = 'Quality-Reviewer').count() == 0:
Group.objects.create(name = 'Quality-Reviewer')
if Group.objects.filter(name = 'Administrator').count() == 0:
Group.objects.create(name = 'Administrator')
messages.success(request, 'Creation application initialised successfully!')
except Exception, e:
messages.error(request, str(e))
return HttpResponseRedirect('/creation/')
# Creation app dashboard
@login_required
def creationhome(request):
if is_contributor(request.user) or is_domainreviewer(request.user) or is_videoreviewer(request.user) or is_qualityreviewer(request.user):
contrib_notifs = []
admin_notifs = []
domain_notifs = []
quality_notifs = []
if is_contributor(request.user):
contrib_notifs = ContributorNotification.objects.filter(user = request.user).order_by('-created')
if is_videoreviewer(request.user):
admin_notifs = AdminReviewerNotification.objects.filter(user = request.user).order_by('-created')
if is_domainreviewer(request.user):
domain_notifs = DomainReviewerNotification.objects.filter(user = request.user).order_by('-created')
if is_qualityreviewer(request.user):
quality_notifs = QualityReviewerNotification.objects.filter(user = request.user).order_by('-created')
context = {
'contrib_notifs': contrib_notifs,
'admin_notifs': admin_notifs,
'domain_notifs': domain_notifs,
'quality_notifs': quality_notifs,
'is_creation_role': True
}
context.update(csrf(request))
return render(request, 'creation/templates/creationhome.html', context)
else:
context = {
'is_creation_role': False
}
return render(request, 'creation/templates/creationhome.html', context)
# tutorial upload index page
@login_required
def upload_index(request):
if not is_contributor(request.user):
raise PermissionDenied()
if request.method == 'POST':
form = UploadTutorialForm(request.user, request.POST)
if form.is_valid():
common_content = TutorialCommonContent()
if TutorialCommonContent.objects.filter(tutorial_detail_id = request.POST['tutorial_name']).count():
common_content = TutorialCommonContent.objects.get(tutorial_detail_id = request.POST['tutorial_name'])
else:
common_content.tutorial_detail = TutorialDetail.objects.get(pk = request.POST['tutorial_name'])
common_content.slide_user = request.user
common_content.code_user = request.user
common_content.assignment_user = request.user
common_content.prerequisite_user = request.user
common_content.keyword_user = request.user
common_content.save()
if TutorialResource.objects.filter(tutorial_detail_id = request.POST['tutorial_name'], common_content_id = common_content.id, language_id = request.POST['language']).count():
tutorial_resource = TutorialResource.objects.get(tutorial_detail_id = request.POST['tutorial_name'], common_content_id = common_content.id, language_id = request.POST['language'])
else:
tutorial_resource = TutorialResource()
tutorial_resource.tutorial_detail = common_content.tutorial_detail
tutorial_resource.common_content = common_content
tutorial_resource.language_id = request.POST['language']
tutorial_resource.outline_user = request.user
tutorial_resource.script_user = request.user
tutorial_resource.video_user = request.user
tutorial_resource.save()
return HttpResponseRedirect('/creation/upload/tutorial/' + str(tutorial_resource.id) + '/')
else:
form = UploadTutorialForm(user=request.user)
context = {
'form': form,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_index.html', context)
def upload_publish_outline(request):
if not is_contributor(request.user):
raise PermissionDenied()
if request.method == 'POST':
form = UploadPublishTutorialForm(request.user, request.POST)
if form.is_valid():
tutorial_resource = TutorialResource.objects.get(tutorial_detail_id = request.POST['tutorial_name'], language_id = request.POST['language'])
return HttpResponseRedirect('/creation/upload/outline/' + str(tutorial_resource.id) + '/?publish=1')
else:
form = UploadPublishTutorialForm(user=request.user)
context = {
'form': form,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload-publish-script.html', context)
@csrf_exempt
def ajax_upload_prerequisite(request):
data = ''
if request.method == 'POST':
foss = ''
try:
foss = int(request.POST.get('foss'))
lang_rec = Language.objects.get(name = 'English')
except:
foss = ''
if foss and lang_rec:
td_list = TutorialDetail.objects.filter(foss_id = foss).values_list('id')
td_recs = TutorialDetail.objects.filter(
id__in = TutorialResource.objects.filter(
tutorial_detail_id__in = td_list,
language_id = lang_rec.id,
).values_list(
'tutorial_detail_id'
)
).order_by('tutorial')
for td_rec in td_recs:
data += '<option value="' + str(td_rec.id) + '">' + td_rec.tutorial + '</option>'
if data:
data = '<option value="">Select Tutorial</option>' + data
return HttpResponse(json.dumps(data), content_type='application/json')
@csrf_exempt
def ajax_upload_foss(request):
data = ''
if request.method == 'POST':
foss = ''
lang = ''
publish = request.POST.get('publish', False)
try:
foss = request.POST.get('foss')
lang = request.POST.get('lang')
except:
foss = ''
lang = ''
if foss and lang and publish:
lang_rec = Language.objects.get(pk = int(lang))
if lang_rec.name == 'English':
td_list = TutorialDetail.objects.filter(foss_id = foss).values_list('id')
tutorials = TutorialDetail.objects.filter(
id__in = td_list
)
else:
eng_rec = Language.objects.get(name = 'English')
td_list = TutorialDetail.objects.filter(foss_id = foss).values_list('id')
tutorials = TutorialDetail.objects.filter(
id__in = TutorialResource.objects.filter(
tutorial_detail_id__in = td_list,
language_id = eng_rec.id,
status = 1
).values_list(
'tutorial_detail_id'
)
)
for tutorial in tutorials:
data += '<option value="' + str(tutorial.id) + '">' + tutorial.tutorial + '</option>'
if data:
data = '<option value="">Select Tutorial</option>' + data
elif foss and lang:
lang_rec = Language.objects.get(pk = int(lang))
if lang_rec.name == 'English':
td_list = TutorialDetail.objects.filter(foss_id = foss).values_list('id')
tutorials = TutorialDetail.objects.filter(
id__in = td_list
).exclude(
id__in = TutorialResource.objects.filter(
tutorial_detail_id__in = td_list,
language_id = lang_rec.id,
status = 1
).values_list(
'tutorial_detail_id'
)
)
else:
eng_rec = Language.objects.get(name = 'English')
td_list = TutorialDetail.objects.filter(foss_id = foss).values_list('id')
tutorials = TutorialDetail.objects.filter(
id__in = TutorialResource.objects.filter(
tutorial_detail_id__in = td_list,
language_id = eng_rec.id,
status = 1
).values_list(
'tutorial_detail_id'
)
).exclude(
id__in = TutorialResource.objects.filter(
tutorial_detail_id__in = td_list,
language_id = lang_rec.id,
status__gte = 1
).values_list(
'tutorial_detail_id'
)
)
for tutorial in tutorials:
data += '<option value="' + str(tutorial.id) + '">' + tutorial.tutorial + '</option>'
if data:
data = '<option value="">Select Tutorial</option>' + data
elif foss:
languages = Language.objects.filter(id__in = ContributorRole.objects.filter(user_id = request.user.id, foss_category_id = foss).values_list('language_id'))
for language in languages:
data += '<option value="' + str(language.id) + '">' + language.name + '</option>'
if data:
data = '<option value="">Select Language</option>' + data
return HttpResponse(json.dumps(data), content_type='application/json')
@csrf_exempt
def ajax_get_keywords(request):
data = ''
if request.method == 'POST':
try:
tutorial_detail_id = int(request.POST.get('tutorial_detail'))
tcc = TutorialCommonContent.objects.get(tutorial_detail_id = tutorial_detail_id)
data = tcc.keyword
except Exception, e:
pass
return HttpResponse(json.dumps(data), content_type='application/json')
@login_required
def upload_tutorial(request, trid):
tr_rec = None
contrib_log = None
review_log = None
try:
tr_rec = TutorialResource.objects.get(pk = trid, status = 0)
ContributorRole.objects.get(user_id = request.user.id, foss_category_id = tr_rec.tutorial_detail.foss_id, language_id = tr_rec.language_id, status = 1)
contrib_log = ContributorLog.objects.filter(tutorial_resource_id = tr_rec.id).order_by('-created')
review_log = NeedImprovementLog.objects.filter(tutorial_resource_id = tr_rec.id).order_by('-created')
except Exception, e:
print e
raise PermissionDenied()
context = {
'tr': tr_rec,
'contrib_log': contrib_log,
'review_log': review_log,
'script_base': settings.SCRIPT_URL,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_tutorial.html', context)
@login_required
def upload_outline(request, trid):
tr_rec = None
publish = int(request.GET.get('publish', 0))
try:
status = 0
if publish:
status = 1
tr_rec = TutorialResource.objects.get(pk = trid, status = status)
ContributorRole.objects.get(user_id = request.user.id, foss_category_id = tr_rec.tutorial_detail.foss_id, language_id = tr_rec.language_id, status = 1)
except Exception, e:
raise PermissionDenied()
if not publish and tr_rec.outline_status > 2 and tr_rec.outline_status != 5:
raise PermissionDenied()
response_msg = ''
error_msg = ''
warning_msg = ''
if request.method == 'POST':
form = UploadOutlineForm(trid, request.POST)
if form.is_valid():
try:
prev_state = tr_rec.outline_status
if tr_rec.outline != request.POST['outline']:
tr_rec.outline = request.POST['outline']
else:
warning_msg = 'There is no change in outline'
if publish:
tr_rec.save()
messages.success(request, "Outline status updated successfully!")
return HttpResponseRedirect('/creation/upload-publish-outline/')
tr_rec.outline_user = request.user
tr_rec.outline_status = 2
tr_rec.save()
ContributorLog.objects.create(status = prev_state, user = request.user, tutorial_resource = tr_rec, component = 'outline')
comp_title = tr_rec.tutorial_detail.foss.foss + ': ' + tr_rec.tutorial_detail.tutorial + ' - ' + tr_rec.language.name
add_domainreviewer_notification(tr_rec, comp_title, 'Outline waiting for Domain review')
response_msg = 'Outline status updated successfully!'
except Exception, e:
print e
error_msg = 'Something went wrong, please try again later.'
else:
context = {
'form': form,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_outline.html', context)
form = UploadOutlineForm(trid)
if response_msg:
messages.success(request, response_msg)
if error_msg:
messages.error(request, error_msg)
if warning_msg:
messages.warning(request, warning_msg)
context = {
'form': form,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_outline.html', context)
@login_required
def upload_script(request, trid):
tr_rec = None
try:
tr_rec = TutorialResource.objects.get(pk = trid, status = 0)
ContributorRole.objects.get(user_id = request.user.id, foss_category_id = tr_rec.tutorial_detail.foss_id, language_id = tr_rec.language_id, status = 1)
except Exception, e:
raise PermissionDenied()
if tr_rec.script_status > 2 and tr_rec.script_status != 5:
raise PermissionDenied()
response_msg = ''
error_msg = ''
storage_path = tr_rec.tutorial_detail.foss.foss.replace(' ', '-') + '/' + tr_rec.tutorial_detail.level.code + '/' + tr_rec.tutorial_detail.tutorial.replace(' ', '-') + '/' + tr_rec.language.name
script_path = settings.SCRIPT_URL + storage_path
if request.method == 'POST':
form = UploadScriptForm(script_path, request.POST)
if form.is_valid():
try:
code = 0
try:
code = urlopen(script_path).code
except Exception, e:
code = e.code
if(int(code) == 200):
prev_state = tr_rec.script_status
tr_rec.script = storage_path
tr_rec.script_user = request.user
tr_rec.script_status = 2
tr_rec.save()
ContributorLog.objects.create(status = prev_state, user = request.user, tutorial_resource = tr_rec, component = 'script')
comp_title = tr_rec.tutorial_detail.foss.foss + ': ' + tr_rec.tutorial_detail.tutorial + ' - ' + tr_rec.language.name
add_domainreviewer_notification(tr_rec, comp_title, 'Script waiting for domain review')
response_msg = 'Script status updated successfully'
else:
error_msg = 'Please update the script to wiki before pressing the submit button.'
except Exception, e:
print e
error_msg = 'Something went wrong, please try again later.'
else:
context = {
'form': form,
'script_path': script_path,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_script.html', context)
form = UploadScriptForm(script_path)
if error_msg:
messages.error(request, error_msg)
if response_msg:
messages.success(request, response_msg)
context = {
'form': form,
'script_path': script_path,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_script.html', context)
@login_required
def upload_timed_script(request):
if not is_contributor(request.user):
raise PermissionDenied()
form = UploadTimedScriptForm(request.user)
if request.method == 'POST':
form = UploadTimedScriptForm(request.user, request.POST)
lang = None
if form.is_valid():
try:
return HttpResponseRedirect('/creation/upload/timed-script/' + request.POST.get('tutorial_name') + '/save/')
except Exception, e:
messages.error(request, str(e))
context = {
'form': form
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_timed_script.html', context)
@login_required
def save_timed_script(request, tdid):
if not is_contributor(request.user):
raise PermissionDenied()
try:
tr_rec = TutorialResource.objects.get(tutorial_detail_id = tdid, language__name = 'English')
ContributorRole.objects.get(user_id = request.user.id, foss_category_id = tr_rec.tutorial_detail.foss_id, language_id = tr_rec.language_id, status = 1)
except Exception, e:
print e
raise PermissionDenied()
response_msg = ''
error_msg = ''
storage_path = tr_rec.tutorial_detail.foss.foss.replace(' ', '-') + '/' + tr_rec.tutorial_detail.level.code + '/' + tr_rec.tutorial_detail.tutorial.replace(' ', '-') + '/' + tr_rec.language.name + '-timed'
script_path = settings.SCRIPT_URL + storage_path
form = UploadScriptForm(script_path)
if request.method == 'POST':
form = UploadScriptForm(script_path, request.POST)
if form.is_valid():
try:
code = 0
try:
code = urlopen(script_path).code
except Exception, e:
code = e.code
if(int(code) == 200):
tr_rec.timed_script = storage_path
tr_rec.save()
srt_file_path = settings.MEDIA_ROOT + 'videos/' + str(tr_rec.tutorial_detail.foss_id) + '/' + str(tr_rec.tutorial_detail_id) + '/' + tr_rec.tutorial_detail.tutorial.replace(' ', '-') + '-English.srt'
minified_script_url = settings.SCRIPT_URL.strip('/') + '?title=' + quote(storage_path) + '&printable=yes'
if generate_subtitle(minified_script_url, srt_file_path):
messages.success(request, 'Timed script updated and subtitle file generated successfully!')
else:
messages.success(request, 'Timed script updated successfully! But there is a in generating subtitle file.')
return HttpResponseRedirect('/creation/upload/timed-script/')
else:
messages.error(request, 'Please update the timed-script to wiki before pressing the submit button.')
except Exception, e:
messages.error(request, str(e))
context = {
'form': form,
'page_heading': 'timed',
'script_path': script_path,
}
context.update(csrf(request))
return render(request, 'creation/templates/save_timed_script.html', context)
@csrf_exempt
def ajax_upload_timed_script(request):
data = ''
foss = request.POST.get('foss', '')
if foss:
rows = TutorialDetail.objects.filter(id__in = TutorialResource.objects.filter(tutorial_detail__foss_id = foss, language__name = 'English', script_status = 4).values_list('tutorial_detail_id')).order_by('order')
data = '<option value="">Select Tutorial Name</option>'
for row in rows:
data += '<option value="' + str(row.id) + '">' + row.tutorial + '</option>'
return HttpResponse(json.dumps(data), content_type='application/json')
@login_required
def upload_prerequisite(request, trid):
tr_rec = None
try:
tr_rec = TutorialResource.objects.get(pk = trid, status = 0)
ContributorRole.objects.get(user_id = request.user.id, foss_category_id = tr_rec.tutorial_detail.foss_id, language_id = tr_rec.language_id, status = 1)
except Exception, e:
raise PermissionDenied()
if tr_rec.common_content.prerequisite_status > 2 and tr_rec.common_content.prerequisite_status != 5:
raise PermissionDenied()
response_msg = ''
error_msg = ''
warning_msg = ''
if request.method == 'POST':
form = UploadPrerequisiteForm(request.user, request.POST)
if form.is_valid():
try:
prev_state = tr_rec.common_content.prerequisite_status
if tr_rec.common_content.prerequisite_id != request.POST['tutorial_name']:
tr_rec.common_content.prerequisite_id = request.POST['tutorial_name']
else:
warning_msg = 'There is no change in Prerequisite'
tr_rec.common_content.prerequisite_user = request.user
tr_rec.common_content.prerequisite_status = 2
tr_rec.common_content.save()
ContributorLog.objects.create(status = prev_state, user = request.user, tutorial_resource = tr_rec, component = 'prerequisite')
comp_title = tr_rec.tutorial_detail.foss.foss + ': ' + tr_rec.tutorial_detail.tutorial + ' - ' + tr_rec.language.name
add_domainreviewer_notification(tr_rec, comp_title, 'Prerequisite waiting for Domain review')
response_msg = 'Prerequisite status updated successfully!'
except Exception, e:
error_msg = 'Something went wrong, please try again later.'
else:
context = {
'form': form,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_prerequisite.html', context)
form = UploadPrerequisiteForm(request.user)
if response_msg:
messages.success(request, response_msg)
if error_msg:
messages.error(request, error_msg)
if warning_msg:
messages.warning(request, warning_msg)
context = {
'form': form,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_prerequisite.html', context)
@login_required
def upload_keywords(request, trid):
tr_rec = None
try:
tr_rec = TutorialResource.objects.get(pk = trid, status = 0)
ContributorRole.objects.get(user_id = request.user.id, foss_category_id = tr_rec.tutorial_detail.foss_id, language_id = tr_rec.language_id, status = 1)
except Exception, e:
raise PermissionDenied()
if tr_rec.common_content.keyword_status > 2 and tr_rec.common_content.keyword_status != 5:
raise PermissionDenied()
response_msg = ''
error_msg = ''
warning_msg = ''
if request.method == 'POST':
form = UploadKeywordsForm(trid, request.POST)
if form.is_valid():
try:
prev_state = tr_rec.common_content.keyword_status
if tr_rec.common_content.keyword != request.POST['keywords']:
tr_rec.common_content.keyword = request.POST['keywords'].lower()
else:
warning_msg = 'There is no change in keywords'
tr_rec.common_content.keyword_user = request.user
tr_rec.common_content.keyword_status = 2
tr_rec.common_content.save()
ContributorLog.objects.create(status = prev_state, user = request.user, tutorial_resource = tr_rec, component = 'keyword')
comp_title = tr_rec.tutorial_detail.foss.foss + ': ' + tr_rec.tutorial_detail.tutorial + ' - ' + tr_rec.language.name
add_domainreviewer_notification(tr_rec, comp_title, 'Keywords waiting for Domain review')
response_msg = 'Keywords status updated successfully!'
except Exception, e:
error_msg = 'Something went wrong, please try again later.'
else:
context = {
'form': form,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_keywords.html', context)
form = UploadKeywordsForm(trid)
if response_msg:
messages.success(request, response_msg)
if error_msg:
messages.error(request, error_msg)
if warning_msg:
messages.warning(request, warning_msg)
context = {
'form': form,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_keywords.html', context)
@login_required
def upload_component(request, trid, component):
tr_rec = None
try:
tr_rec = TutorialResource.objects.get(pk = trid, status = 0)
ContributorRole.objects.get(user_id = request.user.id, foss_category_id = tr_rec.tutorial_detail.foss_id, language_id = tr_rec.language_id, status = 1)
comp_title = tr_rec.tutorial_detail.foss.foss + ': ' + tr_rec.tutorial_detail.tutorial + ' - ' + tr_rec.language.name
except Exception, e:
raise PermissionDenied()
if component == 'video' and getattr(tr_rec, component + '_status') == 4:
raise PermissionDenied()
elif (component == 'slide' or component == 'code' or component == 'assignment') and getattr(tr_rec.common_content, component + '_status') == 4:
raise PermissionDenied()
else:
if request.method == 'POST':
response_msg = ''
error_msg = ''
form = ComponentForm(component, request.POST, request.FILES)
if form.is_valid():
try:
comp_log = ContributorLog()
comp_log.user = request.user
comp_log.tutorial_resource = tr_rec
comp_log.component = component
if component == 'video':
file_name, file_extension = os.path.splitext(request.FILES['comp'].name)
file_name = tr_rec.tutorial_detail.tutorial.replace(' ', '-') + '-' + tr_rec.language.name + file_extension
file_path = settings.MEDIA_ROOT + 'videos/' + str(tr_rec.tutorial_detail.foss_id) + '/' + str(tr_rec.tutorial_detail.id) + '/'
full_path = file_path + file_name
if os.path.isfile(file_path + tr_rec.video) and tr_rec.video_status > 0:
if 'isarchive' in request.POST and int(request.POST.get('isarchive', 0)) > 0:
archived_file = 'Archived-' + str(request.user.id) + '-' + str(int(time.time())) + '-' + tr_rec.video
os.rename(file_path + tr_rec.video, file_path + archived_file)
ArchivedVideo.objects.create(tutorial_resource = tr_rec, user = request.user, version = tr_rec.version, video = archived_file, atype = tr_rec.video_status)
if int(request.POST.get('isarchive', 0)) == 2:
tr_rec.version += 1
fout = open(full_path, 'wb+')
f = request.FILES['comp']
# Iterate through the chunks.
for chunk in f.chunks():
fout.write(chunk)
fout.close()
comp_log.status = tr_rec.video_status
tr_rec.video = file_name
tr_rec.video_user = request.user
tr_rec.video_status = 1
if not tr_rec.version:
tr_rec.version = 1
tr_rec.video_thumbnail_time = '00:' + request.POST.get('thumb_mins', '00') + ':' + request.POST.get('thumb_secs', '00')
tr_rec.save()
if tr_rec.language.name == 'English':
create_thumbnail(tr_rec, 'Big', tr_rec.video_thumbnail_time, '700:500')
create_thumbnail(tr_rec, 'Small', tr_rec.video_thumbnail_time, '170:127')
comp_log.save()
comp_title = tr_rec.tutorial_detail.foss.foss + ': ' + tr_rec.tutorial_detail.tutorial + ' - ' + tr_rec.language.name
add_adminreviewer_notification(tr_rec, comp_title, 'Video waiting for admin review')
response_msg = 'Video uploaded successfully!'
elif component == 'slide':
file_name, file_extension = os.path.splitext(request.FILES['comp'].name)
file_name = tr_rec.tutorial_detail.tutorial.replace(' ', '-') + '-Slides' + file_extension
file_path = settings.MEDIA_ROOT + 'videos/' + str(tr_rec.tutorial_detail.foss_id) + '/' + str(tr_rec.tutorial_detail.id) + '/resources/' + file_name
fout = open(file_path, 'wb+')
f = request.FILES['comp']
# Iterate through the chunks.
for chunk in f.chunks():
fout.write(chunk)
fout.close()
comp_log.status = tr_rec.common_content.slide_status
tr_rec.common_content.slide = file_name
tr_rec.common_content.slide_status = 2
tr_rec.common_content.slide_user = request.user
tr_rec.common_content.save()
comp_log.save()
add_domainreviewer_notification(tr_rec, comp_title, component.title() + ' waiting for domain review')
response_msg = 'Slides uploaded successfully!'
elif component == 'code':
file_name, file_extension = os.path.splitext(request.FILES['comp'].name)
file_name = tr_rec.tutorial_detail.tutorial.replace(' ', '-') + '-Codefiles' + file_extension
file_path = settings.MEDIA_ROOT + 'videos/' + str(tr_rec.tutorial_detail.foss_id) + '/' + str(tr_rec.tutorial_detail.id) + '/resources/' + file_name
fout = open(file_path, 'wb+')
f = request.FILES['comp']
# Iterate through the chunks.
for chunk in f.chunks():
fout.write(chunk)
fout.close()
comp_log.status = tr_rec.common_content.code_status
tr_rec.common_content.code = file_name
tr_rec.common_content.code_status = 2
tr_rec.common_content.code_user = request.user
tr_rec.common_content.save()
comp_log.save()
add_domainreviewer_notification(tr_rec, comp_title, component.title() + ' waiting for domain review')
response_msg = 'Code files uploaded successfully!'
elif component == 'assignment':
file_name, file_extension = os.path.splitext(request.FILES['comp'].name)
file_name = tr_rec.tutorial_detail.tutorial.replace(' ', '-') + '-Assignment' + file_extension
file_path = settings.MEDIA_ROOT + 'videos/' + str(tr_rec.tutorial_detail.foss_id) + '/' + str(tr_rec.tutorial_detail.id) + '/resources/' + file_name
fout = open(file_path, 'wb+')
f = request.FILES['comp']
# Iterate through the chunks.
for chunk in f.chunks():
fout.write(chunk)
fout.close()
comp_log.status = tr_rec.common_content.assignment_status
tr_rec.common_content.assignment = file_name
tr_rec.common_content.assignment_status = 2
tr_rec.common_content.assignment_user = request.user
tr_rec.common_content.save()
comp_log.save()
add_domainreviewer_notification(tr_rec, comp_title, component.title() + ' waiting for domain review')
response_msg = 'Assignment file uploaded successfully!'
except Exception, e:
error_msg = 'Something went wrong, please try again later.'
form = ComponentForm(component)
if response_msg:
messages.success(request, response_msg)
if error_msg:
messages.error(request, error_msg)
context = {
'form': form,
'tr': tr_rec,
'title': component,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_component.html', context)
else:
context = {
'form': form,
'tr': tr_rec,
'title': component,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_component.html', context)
form = ComponentForm(component)
context = {
'form': form,
'tr': tr_rec,
'title': component,
}
context.update(csrf(request))
return render(request, 'creation/templates/upload_component.html', context)
@login_required
def mark_notrequired(request, trid, tcid, component):
tcc = None
try:
tr_rec = TutorialResource.objects.get(pk = trid, status = 0)
ContributorRole.objects.get(user_id = request.user.id, foss_category_id = tr_rec.tutorial_detail.foss_id, language_id = tr_rec.language_id, status = 1)
except Exception, e:
raise PermissionDenied()
try:
tcc = TutorialCommonContent.objects.get(pk = tcid)
if getattr(tcc, component + '_status') == 0:
prev_state = getattr(tcc, component + '_status')
setattr(tcc, component + '_status', 6)
setattr(tcc, component + '_user_id', request.user.id)
tcc.save()
ContributorLog.objects.create(user = request.user, tutorial_resource_id = trid, component = component, status = prev_state)
messages.success(request, component.title() + " status updated successfully!")
else:
messages.error(request, "Invalid resource id!")
except Exception, e:
messages.error(request, 'Something went wrong, please try after some time.')
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def view_component(request, trid, component):
tr_rec = None
context = {}
try:
tr_rec = TutorialResource.objects.get(pk = trid)
except Exception, e:
print e
raise PermissionDenied()
if component == 'outline':
context = {
'component': component,
'component_data': tr_rec.outline
}
elif component == 'keyword':
context = {
'component': component,
'component_data': tr_rec.common_content.keyword
}
elif component == 'video':
video_path = settings.MEDIA_ROOT + "videos/" + str(tr_rec.tutorial_detail.foss_id) + "/" + str(tr_rec.tutorial_detail_id) + "/" + tr_rec.video
video_info = get_video_info(video_path)
context = {
'tr': tr_rec,
'component': component,
'video_info': video_info,
'media_url': settings.MEDIA_URL
}
else:
messages.error(request, 'Invalid component passed as argument!')
return HttpResponseRedirect(request.META['HTTP_REFERER'])
return render(request, 'creation/templates/view_component.html', context)
@login_required
def tutorials_contributed(request):
tmp_ids = []
if is_contributor(request.user):
foss_contrib_list = ContributorRole.objects.filter(user = request.user, status = 1)
for foss_contrib in foss_contrib_list:
tr_recs = TutorialResource.objects.filter(tutorial_detail__foss_id = foss_contrib.foss_category_id, language_id = foss_contrib.language_id)
for tr_rec in tr_recs:
flag = 1
if tr_rec.language.name == 'English':
if (tr_rec.common_content.slide_user_id != request.user.id or tr_rec.common_content.slide_status == 0) and (tr_rec.common_content.code_user_id != request.user.id or tr_rec.common_content.code_status == 0) and (tr_rec.common_content.assignment_user_id != request.user.id or tr_rec.common_content.assignment_status == 0) and (tr_rec.common_content.keyword_user_id != request.user.id or tr_rec.common_content.keyword_status == 0):
flag = 0
else:
flag = 0
if flag == 1 or (tr_rec.outline_user_id == request.user.id and tr_rec.outline_status > 0) or (tr_rec.script_user_id == request.user.id and tr_rec.script_status > 0) or (tr_rec.video_user_id == request.user.id and tr_rec.video_status > 0):
tmp_ids.append(tr_rec.id)
tmp_recs = None
ordering = ''
header = ''
try:
tmp_recs = TutorialResource.objects.filter(id__in = tmp_ids).distinct()
raw_get_data = request.GET.get('o', None)
header = {
1: SortableHeader('S.No', False),
2: SortableHeader('tutorial_detail__foss__foss', True, 'Foss'),
3: SortableHeader('tutorial_detail__tutorial', True, 'Tutorial Name'),
4: SortableHeader('language__name', True, 'Language'),
5: SortableHeader('Outline', False, '', 'col-center'),
6: SortableHeader('Script', False, '', 'col-center'),
7: SortableHeader('Slide', False, '', 'col-center'),
8: SortableHeader('Video', False, '', 'col-center'),
9: SortableHeader('Codefiles', False, '', 'col-center'),
10: SortableHeader('Assignment', False, '', 'col-center'),
11: SortableHeader('Prerequisite', False, '', 'col-center'),
12: SortableHeader('Keywords', False, '', 'col-center'),
13: SortableHeader('Status', False)
}
tmp_recs = get_sorted_list(request, tmp_recs, header, raw_get_data)
ordering = get_field_index(raw_get_data)
counter = 1
for tmp_rec in tmp_recs:
if tmp_rec.id == 3311:
print counter, tmp_rec.tutorial_detail.tutorial
counter += 1
page = request.GET.get('page')
tmp_recs = get_page(tmp_recs, page)
except:
pass
context = {
'collection': tmp_recs,
'header': header,
'ordering': ordering,
'media_url': settings.MEDIA_URL
}
return render(request, 'creation/templates/my_contribs.html', context)
else:
raise PermissionDenied()
@login_required
def tutorials_pending(request):
tmp_ids = []
if is_contributor(request.user) or is_domainreviewer(request.user) or \
is_qualityreviewer(request.user) or is_administrator(is_domainreviewer(request.user)):
try:
tmp_recs = TutorialResource.objects.filter(status=0)
raw_get_data = request.GET.get('o', None)
header = {
1: SortableHeader('S.No', False),
2: SortableHeader('tutorial_detail__foss__foss', True, 'Foss'),
3: SortableHeader('tutorial_detail__tutorial', True, 'Tutorial Name'),
4: SortableHeader('language__name', True, 'Language'),
| 5: SortableHeader('Outline', False, '', 'col-center'), | 4,072 | lcc_e | python | null | 6db294b63c7ab23de37dff9e04724eb68d907ebb6dae1161 |
|
from __future__ import unicode_literals
import copy
import inspect
import warnings
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned,
ObjectDoesNotExist, ValidationError,
)
from django.db import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connections,
router, transaction,
)
from django.db.models import signals
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import CASCADE, Collector
from django.db.models.fields import AutoField
from django.db.models.fields.related import (
ForeignObjectRel, ManyToOneRel, OneToOneField, lazy_related_operation,
resolve_relation,
)
from django.db.models.manager import ensure_default_manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.query_utils import (
DeferredAttribute, deferred_class_factory,
)
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.encoding import force_str, force_text
from django.utils.functional import curry
from django.utils.six.moves import zip
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_version
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and either isn't in an application in "
"INSTALLED_APPS or else was imported before its "
"application was loaded. " % (module, name))
else:
app_label = app_config.label
new_class.add_to_class('_meta', Options(meta, app_label))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(
x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.virtual_fields
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
base._meta.concrete_model._meta.proxied_children.append(new_class._meta)
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
related = resolve_relation(new_class, field.remote_field.model)
parent_links[make_model_tuple(related)] = field
# Do the appropriate setup for any model parents.
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' % (field.name, name, base.__name__)
)
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
base_key = make_model_tuple(base)
if base_key in parent_links:
field = parent_links[base_key]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(
base,
on_delete=CASCADE,
name=attr_name,
auto_created=True,
parent_link=True,
)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'abstract base class %r' % (field.name, name, base.__name__)
)
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers: # NOQA (redefinition of _)
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# Defer creating accessors on the foreign class until it has been
# created and registered. If remote_field is None, we're ordering
# with respect to a GenericForeignKey and don't know what the
# foreign class is - we'll add those accessors later in
# contribute_to_class().
if opts.order_with_respect_to.remote_field:
wrt = opts.order_with_respect_to
remote = wrt.remote_field.model
lazy_related_operation(make_foreign_order_accessors, cls, remote)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields))
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower)
if get_absolute_url_override:
setattr(cls, 'get_absolute_url', get_absolute_url_override)
ensure_default_manager(cls)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(self._meta.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(self._meta.fields)
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.remote_field, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
(isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)
or field.column is None)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.remote_field, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in list(kwargs):
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if cls._deferred:
new = cls(**dict(zip(field_names, values)))
else:
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if six.PY2 and hasattr(self, '__unicode__'):
return force_text(self).encode('utf-8')
return str('%s object' % self.__class__.__name__)
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self._get_pk_val()
if my_pk is None:
return self is other
return my_pk == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provides pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
data[DJANGO_VERSION_PICKLE_KEY] = get_version()
if not self._deferred:
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id, [], simple_class_factory), data
defers = []
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
model = self._meta.proxy_for_model
class_id = model._meta.app_label, model._meta.object_name
return (model_unpickle, (class_id, defers, deferred_class_factory), data)
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = ("Pickled model instance's Django version %s does"
" not match the current version %s."
% (pickled_version, current_version))
else:
msg = "Pickled model instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Returns a set containing names of deferred fields for this instance.
"""
return {
f.attname for f in self._meta.concrete_fields
if isinstance(self.__class__.__dict__.get(f.attname), DeferredAttribute)
}
def refresh_from_db(self, using=None, fields=None, **kwargs):
"""
Reloads field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is not None:
if len(fields) == 0:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
db = using if using is not None else self._state.db
if self._deferred:
non_deferred_model = self._meta.proxy_for_model
else:
non_deferred_model = self.__class__
db_instance_qs = non_deferred_model._default_manager.using(db).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif self._deferred:
deferred_fields = self.get_deferred_fields()
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Throw away stale foreign key references.
if field.is_relation and field.get_cache_name() in self.__dict__:
rel_instance = getattr(self, field.get_cache_name())
local_val = getattr(db_instance, field.attname)
related_val = None if rel_instance is None else getattr(rel_instance, field.target_field.attname)
if local_val != related_val or (local_val is None and related_val is None):
del self.__dict__[field.get_cache_name()]
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
# Ensure that a model instance without a PK hasn't been assigned to
# a ForeignKey or OneToOneField on this model. If the field is
# nullable, allowing the save() would result in silent data loss.
for field in self._meta.concrete_fields:
if field.is_relation:
# If the related field isn't cached, then an instance hasn't
# been assigned and there's no need to worry about this check.
try:
getattr(self, field.get_cache_name())
except AttributeError:
continue
obj = getattr(self, field.name, None)
# A pk may have been assigned manually to a model instance not
# saved to the database (or auto-generated in a case like
# UUIDField), but we allow the save to proceed and rely on the
# database to raise an IntegrityError if applicable. If
# constraints aren't supported by the database, there's the
# unavoidable risk of data corruption.
if obj and obj.pk is None:
# Remove the object from a related instance cache.
if not field.remote_field.multiple:
delattr(obj, field.remote_field.get_cache_name())
raise ValueError(
"save() prohibited to prevent data loss due to "
"unsaved related object '%s'." % field.name
)
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and self._deferred and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
deferred_fields = [
f.attname for f in self._meta.fields
if (f.attname not in self.__dict__ and
isinstance(self.__class__.__dict__[f.attname], DeferredAttribute))
]
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields)
with transaction.atomic(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
signals.post_save.send(sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None
and getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
filter_args = field.get_filter_kwargs_for_object(self)
order_value = cls._base_manager.using(using).filter(**filter_args).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if not isinstance(f, AutoField)]
update_pk = bool(meta.has_auto_field and not pk_set)
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
return filtered._update(values) > 0 or filtered.exists()
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None, keep_parents=False):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, (
"%s object can't be deleted because its %s attribute is set to None." %
(self._meta.object_name, self._meta.pk.attname)
)
collector = Collector(using=using)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by(
'%s%s' % (order, field.name), '%spk' % order
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
filter_args = order_field.get_filter_kwargs_for_object(self)
obj = self._default_manager.filter(**filter_args).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return getattr(self, field.remote_field.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'lookup_type': lookup_type,
'field': field_name,
'field_label': six.text_type(capfirst(field.verbose_name)),
'date_field': unique_for,
'date_field_label': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
| field = opts.get_field(unique_check[0]) | 4,513 | lcc_e | python | null | 811329a16ff2d3a22ff0d09ca64e9653d8c42c93e0a05f02 |
|
# Run optimization of radii and gamma factors to minimize differences between deltaGcalc and deltaGexp
# INPUT:
# > experimental deltaG
# > template geometries in MOPAC input format, previusly optimized with the method to test
# > initial radii values in a file
from commonfunctions import * # common function
from commonparameters import * # common parameters
import numpy as np
import scipy.optimize
import subprocess
from itertools import count
import ConfigParser # import setting file
from optparse import OptionParser
import sys, os, time
from subprocess import Popen, list2cmdline
import glob
import time #check time of calculation
import datetime
import shutil
import math
import ga_interface
from copy import deepcopy #to deepcopy dictionaries
from pylab import *
#~ import minimize_interface
###########
# PARSE
###########
parser = OptionParser()
parser.add_option("-o", "--outfile", dest="outfilename", default=DEFREPORFILE,
help="name of file to write REPORT", metavar="REPORT")
parser.add_option("-c", "--config", dest="configfile", default=CONFIG_FILENAME,
help="FILE to setting the program", metavar="CONFIG")
parser.add_option("-i", "--initial", dest="paramfile", default=DEFPARAMFILE,
help="FILE to read values of params", metavar="PARAMETERS")
parser.add_option("-r", "--ref", dest="reffilename", default=DEFREFFILE,
help="FILE to read values to comparate", metavar="FILE")
parser.add_option("-g", "--gasdir", dest="gasdir", default=DEFGASDIR,
help="GASDIR directory where take the gas calculation", metavar="GASDIR")
parser.add_option("-t", "--templatedir", dest="templatedir", default=DEFTEMPLATEDIR,
help="TEMPLATEDIR directory where take the template files", metavar="TEMPLATEDIR")
(options, args) = parser.parse_args()
outfilename = options.outfilename
paramfile = options.paramfile
reffilename = options.reffilename
gasdir = options.gasdir
templatedir = options.templatedir
cfg = ConfigParser.ConfigParser()
if not cfg.read([options.configfile]):
print "No existe el archivo"
###########
# VARS
###########
varlist = [] #store name of vars to then print
mcmarklist = [] #store the last mcmark to control the temperature in MC run
#limits
if cfg.has_option("limits", "radiilimit"):
radiilimit = float(cfg.get("limits", "radiilimit"))
else:
radiilimit = 0.4 #fix maximum displacement from initial values
varlist.append("radiilimit")
if cfg.has_option("limits", "radiirange"):
radiirange = float(cfg.get("limits", "radiirange"))
else:
radiirange = 0.2 #range of radii
varlist.append("radiirange")
if cfg.has_option("limits", "cosmoradiilimit"):
cosmoradiilimit = float(cfg.get("limits", "cosmoradiilimit"))
else:
cosmoradiilimit = 0.2 #fix maximum displset_FortiRM12007acement from initial values of cosmo radii. only for internal calculation of MOPAC
varlist.append("cosmoradiilimit")
if cfg.has_option("limits", "cosmoradiirange"):
cosmoradiirange = float(cfg.get("limits", "cosmoradiirange"))
else:
cosmoradiirange = 0.1 #range of cosmo radii
varlist.append("cosmoradiirange")
if cfg.has_option("limits", "gammalimit"):
gammalimit = float(cfg.get("limits", "gammalimit"))
else:
gammalimit = 0.099 #fix maximum displacement from initial values
varlist.append("gammalimit")
if cfg.has_option("limits", "gammarange"):
gammarange = float(cfg.get("limits", "gammarange"))
else:
gammarange = 0.05 #range of gamma
varlist.append("gammarange")
if cfg.has_option("limits", "rsolvlimit"):
rsolvlimit = float(cfg.get("limits", "rsolvlimit"))
else:
rsolvlimit = 0.2 #fix maximum displacement from initial values
varlist.append("rsolvlimit")
if cfg.has_option("limits", "rsolvrange"):
rsolvrange = float(cfg.get("limits", "rsolvrange"))
else:
rsolvrange = 0.1 #range of solvent radii
varlist.append("rsolvrange")
if cfg.has_option("limits", "eumbrallimit"):
eumbrallimit = float(cfg.get("limits", "eumbrallimit"))
else:
eumbrallimit = 0.1
varlist.append("eumbrallimit")
if cfg.has_option("limits", "eumbralrange"):
eumbralrange = float(cfg.get("limits", "eumbralrange"))
else:
eumbralrange = 0.05
varlist.append("eumbralrange")
if cfg.has_option("limits", "krange"):
krange = float(cfg.get("limits", "krange"))
else:
krange = 0.5
varlist.append("krange")
if cfg.has_option("limits", "klimit"):
klimit = float(cfg.get("limits", "klimit"))
else:
klimit = 1.0
varlist.append("klimit")
#method
if cfg.has_option("method", "extrakeys"):
extrakeys =cfg.get("method", "extrakeys")
else:
extrakeys = "PM6 PRECISE 1SCF"
varlist.append("extrakeys")
if cfg.has_option("method", "extrakeyssolv"):
extrakeyssolv = cfg.get("method", "extrakeyssolv")
else:
extrakeyssolv = "EPS=78 COSWRT"
varlist.append("extrakeyssolv")
if cfg.has_option("method", "templateext"):
templateext = cfg.get("method", "templateext")
else:
templateext = DEFTEMPLATEEXT
varlist.append("templateext")
#system
if cfg.has_option("system", "calculationtype"):
calculationtype = cfg.get("system", "calculationtype")
else:
calculationtype="mc"
varlist.append("calculationtype")
# genetic algorithm parameters
if cfg.has_option("system", "numbermembers"):
numbermembers = int(cfg.get("system", "numbermembers"))
else:
numbermembers = 20
varlist.append("numbermembers")
if cfg.has_option("system", "maxgen"):
maxgen = int(cfg.get("system", "maxgen"))
else:
maxgen = 100
varlist.append("maxgen")
if cfg.has_option("system", "onlyneutral"):
onlyneutral = cfg.getboolean("system", "onlyneutral")
else:
onlyneutral=True
varlist.append("onlyneutral")
if cfg.has_option("system", "cyclicoptimization"):
cyclicoptimization = cfg.getboolean("system", "cyclicoptimization")
else:
cyclicoptimization = True #sequence of radii and gamma optimization
varlist.append("cyclicoptimization")
if cfg.has_option("system", "maxiter"):
maxiter = int(cfg.get("system", "maxiter"))
else:
maxiter = 20000 #maximum number of iterations in the minimization
varlist.append("maxiter")
if cfg.has_option("system", "nptype"):
nptype = cfg.get("system", "nptype")
else:
nptype = "claverie" #"electronmod" #"claverie"
varlist.append("nptype")
if cfg.has_option("system", "excludeatomlist"):
excludeatomlist = cfg.get("system", "excludeatomlist").split()
varlist.append("excludeatomlist")
if cfg.has_option("system", "temperature"):
temperature = float(cfg.get("system", "temperature"))
else:
temperature = 0.10 #temperature of Monte Carlo simulation
varlist.append("temperature")
if cfg.has_option("system", "rangeslope"):
rangeslope = float(cfg.get("system", "rangeslope"))
else:
rangeslope = 0.995 #velocity of decrement of range that take values
varlist.append("rangeslope")
if cfg.has_option("system", "fixlist"):
fixlist = cfg.get("system", "fixlist").split()
varlist.append("fixlist")
#set the formats of optimization, step1 p1 p2 p3 pn N, where p1...pn are paremeters, and N number of cycles.
stepslist = []
try:
for n in range(1,1000): #max of 1000 optimization step formats
if cfg.has_option("system", "step" + str(n)):
stepall = cfg.get("system", "step"+str(n)).split()
stepcycles = int(stepall.pop())
stepslist.append([stepall,stepcycles]) #store: parameters, number of cycles
except:
if len(stepslist)==0:
print "ERROR: You must define step settings in config.in, eg. step1 = p1 p2 p3 100 where p1, p2, p3 are parameters to optimize, and 100 the number of cycles per step."
exit()
else:
varlist.append("stepslist")
#~ limitdic={"radii":radiilimit, "gamma":gammalimit, "rsolv":rsolvlimit} #limits determine fix extremes of values center in "initial" values.
rangesdic = {"radii":radiirange, "gamma":gammarange, "rsolv":rsolvrange, "eumbral":eumbralrange, "cosmoradii":cosmoradiirange, "k":krange} #moveable ranges, centers are "current" values in MC run
###########
# PROGRAM
###########
# NOTATION SUFFIX:
# 0: initial
# test: value in current step
# best: the lowest totalerror step
# current: store last trajectory values of MC run
# to debbug, copy source code to directory
os.system("cp " + __file__ + " source_code.py")
#initial time
start_time = time.time()
#initialize report file
#outfile = open(outfilename, "w",0)
outfile = open(outfilename, "w")
#read initial parameter values to adjust
param0dic = parameters_read(paramfile)
#read referential values to compare
datadic = exp_read(reffilename)
if onlyneutral==True:
datadic = {your_key: datadic[your_key] for your_key in datadic if "anion" not in your_key }
datadic = {your_key: datadic[your_key] for your_key in datadic if "protonated" not in your_key }
#erase compounds with a particular element of dgrefdic and template lists
try:
excludeatomlist
except NameError:
pass
else:
newdatadic = {}
for key, values in datadic.iteritems():
if check_excludeatom(templatedir + "/" + key + templateext,excludeatomlist)==False:
newdatadic[key]=datadic[key]
else:
print "Exclude:" + key
datadic = newdatadic
fixlimitdic={}
minlimitdic={}
maxlimitdic={}
for key, value in param0dic.iteritems():
if "r@" in key:
limitvalue = radiilimit
elif "g@" in key:
limitvalue = gammalimit
elif "rsolv@" in key:
limitvalue = rsolvlimit
elif "e@" in key: #number of electrons umbral
limitvalue = eumbrallimit
elif "rc@" in key: #cosmo radius
limitvalue = cosmoradiilimit
elif "k@" in key: #constant multiplica radii
limitvalue = klimit
else:
limitvalue = 0
fixlimitdic[key]=[value-limitvalue, value+limitvalue]
minlimitdic[key]=value-limitvalue
maxlimitdic[key]=value+limitvalue
#make GAS phase calculation
for key, value in datadic.iteritems():
datadic[key]["template"]=templatedir + "/" + key + templateext
inputfile_make(value["template"],extrakeys)
#run GAS phase calculation
gasinputlist = list_files(gasdir)
commands = []
for gasinput in gasinputlist:
commands.append([MOPACPATH, gasinput])
exec_commands(commands)
#extract GAS Heat of Formation
for key, values in datadic.iteritems():
datadic[key]["hofgas"],datadic[key]["cosmoarea"] = mopacout_read(gasdir + "/" + key + templateext.replace(".mop","_gas.out")) #extract HOF
#head of report file
outfile.write(str(datetime.datetime.now())+"\n")
#write settings in report file
for name in varlist:
try:
outfile.write(name + "\t\t=\t" + str(eval(name)) + "\n")
except:
pass
outfile.write("\nKEYS:" + extrakeys + " SOLVENT:" + extrakeyssolv +"\n")
outfile.write("REFERENCES:\n")
for key, value in datadic.iteritems():
outfile.write(key + ":" + str(value["dgexp"]) + "\n")
outfile.write(20*"=" + "\n")
if cyclicoptimization == True:
period = 0
maxcyclebystep = stepslist[period][1] # number of cycle by step #add to fix GA
freeparamlist = stepslist[period][0] #add to fix GA
else:
freeparamlist=[]
for value,key in param0dic.iteritems():
freeparamlist.append(value)
rsolvtest = param0dic["rsolv@"] #take rsolv parameter defined in input parameter file
if nptype=="claverie" or nptype=="claverietype":
#~ yrel0 = 4*PI*NS*(rsolvtest*1.0E-10)*(rsolvtest*1.0E-10)*(rsolvtest*1.0E-10)/3.0 # ratio to use in claverie cavitation term
yrel0 = 4*PI*NS*rsolvtest*rsolvtest*rsolvtest/3.0 # ratio to use in claverie cavitation term
else:
yrel0 = 0
#calculate initial value
mintotalerror, mae, rmse, bias, r2, slope, intercept, datadic = calc_error("0000", param0dic, datadic, extrakeys, extrakeyssolv + " RSOLV=% .3f" % (rsolvtest), outfile, nptype, yrel0)
outfile.write("\n")
# write summary file with initial values for first step
print_summary(mintotalerror, mae, rmse, bias, r2, slope, intercept,0,datadic,nptype)
paramtestdic = param0dic #copy initial parameter values to a new dictonary (test) to modified
beststep = [mintotalerror, paramtestdic, datadic] # first element: minimum error, second element: list of values of best step
currentstep = [mintotalerror, paramtestdic, datadic] # first element: minimum error, second element: list of values of current step
#Monte Carlo run
if calculationtype == 'mc':
acumcycle = 0 # accumulate the number of cycles
for ncycle in range(1,maxiter):
mcmark = "\n"
#number of step, to generate name of files
numberstep = next(step)
if cyclicoptimization == True:
maxcyclebystep = stepslist[period][1] # number of cycle by step
freeparamlist = stepslist[period][0]
if acumcycle >= maxcyclebystep:
acumcycle = 0 # reset count
if period == len(stepslist)-1:
period = 0 #restart to first step
else:
period += 1 #next step
paramtestdic = beststep[1]
datadic = beststep[2]
print period
#check restrictions, if not assign parameters again
tag1sttry = True # the first time into while loop
while (check_restrictions(paramtestdic,fixlimitdic)==0) or (tag1sttry == True):
tag1sttry = False # first time into while loop
# generate new values of parameters with a Gaussian distribution probability from current values
paramtestdic = modified_values(currentstep[1], freeparamlist, rangesdic)
# multiplied initial radii with a constant k, which is optimizing.
try:
if paramtestdic["k@"]:
for key, value in paramtestdic.iteritems():
if "rc@" in key:
paramtestdic[key] = param0dic[key] * paramtestdic["k@"]
except:
pass
#fix values
try:
for key in fixlist:
paramtestdic[key]=param0dic[key]
except:
pass
rsolvtest = paramtestdic["rsolv@"] #take rsolv parameter
if nptype=="claverie" or nptype=="claverietype":
#~ yrel = 4*PI*NS*(rsolvtest*1.0E-10)*(rsolvtest*1.0E-10)*(rsolvtest*1.0E-10)/3.0 # ratio to use in claverie cavitation term
yrel = 4*PI*NS*rsolvtest*rsolvtest*rsolvtest/3.0 # ratio to use in claverie cavitation term
else:
yrel = 0
#=================================================================================================
# check if the new value of error function is below respect to the best value store in beststep[0]
totalerror, mae, rmse, bias, r2, slope, intercept, datadic = calc_error(numberstep, paramtestdic, datadic, extrakeys, extrakeyssolv + " RSOLV=% .3f" % (rsolvtest), outfile, nptype, yrel)
if totalerror < mintotalerror:
mintotalerror = totalerror
# write summary file with the low totalerror step
print_summary(mintotalerror, mae, rmse, bias, r2, slope, intercept, ncycle,datadic,nptype)
# write parameter values
paramout = open("list_param.out","w")
for key, value in sorted(paramtestdic.iteritems()):
paramout.write("%-3s \t %.3f\n" % (key,value))
paramout.close()
# ====================================================================
#~ else:
#~ shutil.rmtree(numberstep)
shutil.rmtree(numberstep) # erase current directory
prob = np.random.rand()
if totalerror < currentstep[0] or prob < math.exp(-(totalerror-currentstep[0])/temperature):
mcmark = " @PROB\n"# mark that indicates this is a step selected by Monte Carlo because decent in energy
# "current" vars store last trajectory values of MC run.
currentstep = [totalerror,paramtestdic,datadic]
if totalerror < beststep[0]:
mcmark = " @DESC\n" # change mark that indicates this is a step selected by Monte Carlo because decent in energy
beststep = [totalerror,paramtestdic,datadic]
outfile.write(mcmark) #end of line with a mark, @DESC: recenter because below value of function, @PROB: select from probability
#scale the temperature as function of times of @PROB mcmark
#mcmarklist store the n last mcmark
temperature, mcmarklist = temperature_control(temperature,mcmarklist,mcmark)
print temperature
print mcmarklist
print "PASO:" + str(ncycle)
acumcycle += 1
#Genetic Algorithm run
elif calculationtype == 'ga':
acumcycle = 0 # accumulate the number of cycles
memberlist = [] #list that store members (list of paramtestdic)
#building parents
for n in range(0,numbermembers):
#check restrictions, if not assign parameters again
tag1sttry = True # the first time into while loop
while (check_restrictions(paramtestdic,fixlimitdic)==0) or (tag1sttry == True):
tag1sttry = False # first time into while loop
# generate new values of parameters with a distribution probability from current values
paramtestdic = modified_values(currentstep[1], freeparamlist, rangesdic)
# multiplied initial radii with a constant k, which is optimizing.
try:
if paramtestdic["k@"]:
for key, value in paramtestdic.iteritems():
if "rc@" in key:
paramtestdic[key] = param0dic[key] * paramtestdic["k@"]
except:
pass
#fix values
try:
for key in fixlist:
paramtestdic[key]=param0dic[key]
except:
pass
memberlist.append(ga_interface.memberobj(paramtestdic))
#generations
for ncycle in range(1,maxgen):
if cyclicoptimization == True:
maxcyclebystep = stepslist[period][1] # number of cycle by step
freeparamlist = stepslist[period][0]
if acumcycle >= maxcyclebystep:
acumcycle = 0 # reset count
if period == len(stepslist)-1:
period = 0 #restart to first step
else:
period += 1 #next step
paramtestdic = beststep[1]
datadic = beststep[2]
print period
#number of step, to generate name of files
#~ numberstep = next(step)
numberstep = "0000"
#fitness calculation
fitnesslist = [] #store fitness
for member in memberlist:
paramtestdic = member.paramdic
rsolvtest = paramtestdic["rsolv@"] #take rsolv parameter
if nptype=="claverie" or nptype=="claverietype":
yrel = 4*PI*NS*rsolvtest*rsolvtest*rsolvtest/3.0 # ratio to use in claverie cavitation term
else:
yrel = 0
#=================================================================================================
# check if the new value of error function is below respect to the best value store in beststep[0]
if member.fitness == None:
totalerror, mae, rmse, bias, r2, slope, intercept, datadic = calc_error(numberstep, paramtestdic, datadic, extrakeys, extrakeyssolv + " RSOLV=% .3f" % (rsolvtest), outfile, nptype, yrel)
if totalerror < mintotalerror:
mintotalerror = totalerror
# write summary file with the low totalerror step
print_summary(mintotalerror, mae, rmse, bias, r2, slope, intercept, ncycle,datadic,nptype)
# write parameter values
paramout = open("list_param.out","w")
for key, value in sorted(paramtestdic.iteritems()):
paramout.write("%-3s \t %.4f\n" % (key,value))
paramout.close()
# ====================================================================
#~ else:
#~ shutil.rmtree(numberstep)
shutil.rmtree(numberstep) # erase current directory
fitness = 1.0/(totalerror*totalerror*totalerror*totalerror)
fitnesslist.append(fitness)
member.set_fitness(fitness)
else:
fitnesslist.append(member.fitness)
#add to fix write all individues in report file
# i must to FIX this, totalerror, mae, rmse, bias, r2 were obtained of other member
prevline = "%-5s CALCULATED Err: %3.4f " % (numberstep, 1/member.fitness**4)
outfile.write(prevline + print_param(paramtestdic))
outfile.write("\n")
genetic = ga_interface.SetsGeneration(memberlist, minlimitdic, maxlimitdic)
memberlist = genetic.next()
outfile.write("\n") #end of line with a mark
acumcycle += 1
print "PASO: %i %.3f"%(ncycle,np.mean(fitnesslist))
salida = ""
for fitn in sorted(fitnesslist):
salida += " %.3f"%(fitn)
print salida
# Minimize Algorithm run
elif calculationtype == 'minimize':
from lmfit import minimize, Parameters, Parameter, report_fit, Minimizer
acumcycle = 0
limitcycle = 100
cycleswithoutdown = 0
maxcycleminimization = 200
def fcn2min(params, extrakeys, extrakeyssolv, outfile, nptype, datacompoundnamelist):
global mintotalerror
global acumcycle
global datadic
global cycleswithoutdown
global limitcycle
global maxcycleminimization
#rebuild paramdic
paramdic={}
for key, values in params.iteritems():
key = key.replace("zzz","@") #change to original key formats
key = key.replace("xxx",".")
paramdic[key]=values.value
rsolvtest = paramdic["rsolv@"]
if nptype=="claverie" or nptype=="claverietype":
yrel = 4*PI*NS*rsolvtest*rsolvtest*rsolvtest/3.0 # ratio to use in claverie cavitation term
else:
yrel = 0
totalerror, mae, rmse, bias, r2, slope, intercept, datadic = calc_error("0000", paramdic, datadic, extrakeys, extrakeyssolv + " RSOLV=% .3f" % (rsolvtest), outfile, nptype, yrel)
if totalerror < mintotalerror:
cycleswithoutdown = 0
mintotalerror = totalerror
# write summary file with the low totalerror step
print_summary(mintotalerror, mae, rmse, bias, r2, slope, intercept, acumcycle,datadic,nptype)
# write parameter values
paramout = open("list_param.out","w")
for key, value in sorted(paramdic.iteritems()):
print "%-3s \t %.5f\n" % (key,value)
paramout.write("%-3s \t %.5f\n" % (key,value))
paramout.close()
else:
cycleswithoutdown += 1
if cycleswithoutdown > limitcycle:
return #exit function with error
#~ shutil.rmtree(numberstep)
shutil.rmtree("0000") # erase current directory
print "PASO %i: %f"%(acumcycle,totalerror)
acumcycle += 1
errors = []
for i in range(0,len(datacompoundnamelist)):
errors.append(datadic[datacompoundnamelist[i]]['dgexp']-datadic[datacompoundnamelist[i]]['dgcalc'])
#~ return errors
if acumcycle >= maxcycleminimization:
return
return totalerror
params = Parameters()
for ikey, ivalue in param0dic.iteritems():
maxlimit = maxlimitdic[ikey]
minlimit = minlimitdic[ikey]
ikey = ikey.replace("@","zzz") #replace @ with zzz because this character is not supported by lmfit library
ikey = ikey.replace(".","xxx") #replace . with xxx because this character is not supported by lmfit library
#~ params.add(ikey, value=ivalue)
if "rsolv" in ikey:
params.add(ikey, ivalue, False)
else:
#~ params.add(ikey, ivalue, False)
params.add(ikey, value=ivalue, min=minlimit, max=maxlimit)
print params
#experimental data
datacompoundnamelist = []
for ikey, ivalue in datadic.iteritems():
datacompoundnamelist.append(ikey) #to convert in error function to a dictionary
#~ extra_kwargs={}
#~ extra_kwargs['epsfcn'] = 0.5
#~ result = minimize(fcn2min, params, args=(extrakeys, extrakeyssolv, rsolvtest, outfile, nptype, datacompoundnamelist), method='lbfgs', **extra_kwargs)
#~ extra_kwargs={}
#~ extra_kwargs['T'] = 300.0
#~ extra_kwargs['stepsize']=0.1
#~ result = minimize(fcn2min, params, args=(extrakeys, extrakeyssolv, rsolvtest, outfile, nptype, datacompoundnamelist), method='basinhopping', **extra_kwargs)
options = {}
options["maxiter"] = 3
kws = {}
kws["options"]=options
print kws
#~ myfit = Minimizer(fcn2min, params, fcn_args=(extrakeys, extrakeyssolv, outfile, nptype, datacompoundnamelist), maxfun=5, **options)
#~ myfit.prepare_fit()
#~ init = my_residual(p_fit, x)
#~ pylab.plot(x, init, 'b--')
#~ myfit.fmin()
try:
result = minimize(fcn2min, params, args=(extrakeys, extrakeyssolv, outfile, nptype, datacompoundnamelist), method='powell')
except:
pass
#~ result = minimize(fcn2min, params, args=(extrakeys, extrakeyssolv, outfile, nptype, datacompoundnamelist), method='powell', **kws)
#~ #calculate final result
#~ final = data + result.residual
# write error report
report_fit(params)
#~ # try to plot results
#~ try:
#~ import pylab
#~ pylab.plot(x, data, 'k+')
#~ pylab.plot(x, final, 'r')
#~ pylab.show()
#~ except:
#~ pass
# genetic algorithm + minimization run
elif calculationtype == 'gamin':
from lmfit import minimize, Parameters, Parameter, report_fit
maxcycleminimization = 30 #set minimization period
maxcyclega = 20 #set genetic algorithm period
#function to minimize
def fcn2min(params, extrakeys, extrakeyssolv, outfile, nptype, datacompoundnamelist):
global mintotalerror #the most minimal error
global acumcycle #general count of cycles
global datadic #store experimental, calculated and contributions to solvation energies for all compounds
global paramtestdic # params to test
global cyclemin # cycle number of the period of minimization
global mintotalerrormember # minimal error of a member
global minparamtestdic #store params with mintotalerrormemeber
#rebuild paramtestdic
paramtestdic={}
for key, values in params.iteritems():
key = key.replace("zzz","@") #change to original key formats
key = key.replace("xxx",".")
paramtestdic[key]=values.value
rsolvtest = paramtestdic["rsolv@"]
if nptype=="claverie" or nptype=="claverietype":
yrel = 4*PI*NS*rsolvtest*rsolvtest*rsolvtest/3.0 # ratio to use in claverie cavitation term
else:
yrel = 0
totalerror, mae, rmse, bias, r2, slope, intercept, datadic = calc_error("0000", paramtestdic, datadic, extrakeys, extrakeyssolv + " RSOLV=% .3f" % (rsolvtest), outfile, nptype, yrel)
if totalerror < mintotalerror:
mintotalerror = totalerror
# write summary file with the low totalerror step
print_summary(mintotalerror, mae, rmse, bias, r2, slope, intercept, acumcycle,datadic,nptype)
# write parameter values
paramout = open("list_param.out","w")
for key, value in sorted(paramtestdic.iteritems()):
paramout.write("%-3s \t %.5f\n" % (key,value))
paramout.close()
if totalerror < mintotalerrormember:
minparamtestdic = deepcopy(paramtestdic)
shutil.rmtree("0000") # erase current directory
print "PASO %i: %f"%(cyclemin,totalerror)
if cyclemin >= maxcycleminimization:
return
cyclemin += 1
return totalerror
memberlist = [] #list that store members (list of paramtestdic)
#building parents
for n in range(0,numbermembers):
#check restrictions, if not assign parameters again
tag1sttry = True # the first time into while loop
while (check_restrictions(paramtestdic,fixlimitdic)==0) or (tag1sttry == True):
tag1sttry = False # first time into while loop
# generate new values of parameters with a distribution probability from current values
paramtestdic = modified_values(currentstep[1], freeparamlist, rangesdic)
# multiplied initial radii with a constant k, which is optimizing.
try:
if paramtestdic["k@"]:
for key, value in paramtestdic.iteritems():
if "rc@" in key:
paramtestdic[key] = param0dic[key] * paramtestdic["k@"]
except:
pass
#fix values
try:
for key in fixlist:
paramtestdic[key]=param0dic[key]
except:
pass
memberlist.append(ga_interface.memberobj(paramtestdic))
acumcycle = 0 # accumulate the number of cycles
cyclega = 0 # accumulate the number of cycles of GA period
#generations
for ncycle in range(1,maxgen):
#number of step, to generate name of files
numberstep = next(step)
#fitness calculation
fitnesslist = [] #store fitness
for member in memberlist:
paramtestdic = deepcopy(member.paramdic)
minparamtestdic = deepcopy(member.paramdic)
rsolvtest = paramtestdic["rsolv@"] #take rsolv parameter
if nptype=="claverie" or nptype=="claverietype":
yrel = 4*PI*NS*rsolvtest*rsolvtest*rsolvtest/3.0 # ratio to use in claverie cavitation term
else:
yrel = 0
if (cyclega == maxcyclega) or (acumcycle == maxgen - 1) :
# do minimization
print "minimization..."
cyclemin = 0
params = Parameters()
for ikey, ivalue in paramtestdic.iteritems():
# add to fix params 20150105
if ikey in fixlist:
maxlimit = ivalue + 0.00000001
minlimit = ivalue - 0.00000001
else:
maxlimit = maxlimitdic[ikey]
minlimit = minlimitdic[ikey]
ikey = ikey.replace("@","zzz") #replace @ with zzz because this character is not supported by lmfit library
ikey = ikey.replace(".","xxx") #replace . with xxx because this character is not supported by lmfit library
#~ params.add(ikey, value=ivalue)
#~ if "rczzz" in ikey:
#~ params.add(ikey, value=ivalue, min=minlimit, max=maxlimit)
#~ else:
#~ params.add(ikey, ivalue, False)
params.add(ikey, value=ivalue, min=minlimit, max=maxlimit)
#experimental data
datacompoundnamelist = []
for ikey, ivalue in datadic.iteritems():
datacompoundnamelist.append(ikey) #to convert in error function to a dictionary
try:
mintotalerrormember = 1 / (member.fitness * member.fitness * member.fitness * member.fitness)
except:
mintotalerrormember = 1000.0
try:
result = minimize(fcn2min, params, args=(extrakeys, extrakeyssolv, outfile, nptype, datacompoundnamelist), method='powell')
except:
pass
member.paramdic = deepcopy(minparamtestdic)
member.fitness = None
if member.fitness == None:
totalerror, mae, rmse, bias, r2, slope, intercept, datadic = calc_error(numberstep, paramtestdic, datadic, extrakeys, extrakeyssolv + " RSOLV=% .3f" % (rsolvtest), outfile, nptype, yrel)
if totalerror < mintotalerror:
mintotalerror = totalerror
# write summary file with the low totalerror step
print_summary(mintotalerror, mae, rmse, bias, r2, slope, intercept, ncycle,datadic,nptype)
# write parameter values
paramout = open("list_param.out","w")
for key, value in sorted(paramtestdic.iteritems()):
paramout.write("%-3s \t %.3f\n" % (key,value))
paramout.close()
shutil.rmtree(numberstep) # erase current directory
fitness = 1.0/(totalerror*totalerror*totalerror*totalerror)
fitnesslist.append(fitness)
member.set_fitness(fitness)
else:
fitnesslist.append(member.fitness)
#add to fix write all individues in report file
prevline = "%-5s Err: %3.4f MAE: %3.4f RMSE: %3.4f BIAS: %3.4f R2: %1.5f " % (numberstep, totalerror, mae, rmse, bias, r2)
outfile.write(prevline + print_param(paramtestdic))
if cyclega >= maxcyclega:
cyclega = 0
outfile.write("\n")
genetic = ga_interface.SetsGeneration(memberlist, minlimitdic, maxlimitdic)
memberlist = genetic.next()
outfile.write("\n") #end of line with a mark
cyclega = cyclega + 1
acumcycle += 1
print "PASO: %i %.3f"%(ncycle,np.mean(fitnesslist))
salida = ""
for fitn in sorted(fitnesslist):
salida += " %.3f"%(fitn)
print salida
# static to manual test
elif calculationtype == 'statics':
print "MAE: %2.3f RMSE: %2.3f BIAS: %2.3f R2: %1.5f Slope: %2.3e Intercept: %2.3e" % (mae, rmse, bias, r2, slope, intercept)
print "symbol rc@ g@ MAE RMSE BIAS slope intercept R2 eslope eintercept eR2"
for elementsymbol in ["C", "O", "N", "H", "F", "Cl", "Br", "P", "S"]:
try:
mae, rmse, bias, r2, slope, intercept, errorr2, errorslope, errorintercept = calc_staticsbyelement("0000", datadic, elementsymbol)
radio = "rc@" + elementsymbol + ".a"
gamma = "g@" + elementsymbol + ".a"
print "<%s> %1.3f %1.3f %3.3f %3.3f %3.3f %3.3f %3.3f %1.4f %3.3f %3.3f %1.4f" % (elementsymbol, param0dic[radio], param0dic[gamma], mae, rmse, bias, slope, intercept, r2, errorslope, errorintercept, errorr2)
#~ print "<%s> %1.3f %1.3f MAE: %3.3f RMSE: %3.3f BIAS: %3.3f CALCvsEXP: %3.3f x + %3.3f, R2: %1.4f ERRORvsELECTRON: %3.3f x + %3.3f R2 %1.4f" % (elementsymbol, param0dic[radio], param0dic[gamma], mae, rmse, bias, slope, intercept, r2, errorslope, errorintercept, errorr2)
except:
print "<%s> sin compuestos" %(elementsymbol)
elif calculationtype == 'minstatics':
def make_modification(value0,rangevalue,stepsize):
currentvalue = value0 - rangevalue
valuelist = []
while currentvalue < value0 + rangevalue:
valuelist.append([currentvalue,None])
currentvalue += stepsize
return valuelist
from lmfit import minimize, Parameters, Parameter, report_fit
maxcycleminimization = 30 #set minimization period
#function to minimize
def fcn2min(params, extrakeys, extrakeyssolv, outfile, nptype, datacompoundnamelist):
global mintotalerror #the most minimal error
global ncycle #general count of cycles
global datadic #store experimental, calculated and contributions to solvation energies for all compounds
global paramtestdic # params to test
global cyclemin # cycle number of the period of minimization
#rebuild paramtestdic
paramtestdic={}
for key, values in params.iteritems():
key = key.replace("zzz","@") #change to original key formats
key = key.replace("xxx",".")
paramtestdic[key]=values.value
rsolvtest = paramtestdic["rsolv@"]
if nptype=="claverie" or nptype=="claverietype":
yrel = 4*PI*NS*rsolvtest*rsolvtest*rsolvtest/3.0 # ratio to use in claverie cavitation term
else:
yrel = 0
totalerror, mae, rmse, bias, r2, slope, intercept, datadic = calc_error("0000", paramtestdic, datadic, extrakeys, extrakeyssolv + " RSOLV=% .3f" % (rsolvtest), outfile, nptype, yrel)
if totalerror < mintotalerror:
mintotalerror = totalerror
# write summary file with the low totalerror step
print_summary(mintotalerror, mae, rmse, bias, r2, slope, intercept, ncycle,datadic,nptype)
# write parameter values
paramout = open("list_param.out","w")
for key, value in sorted(paramtestdic.iteritems()):
paramout.write("%-3s \t %.5f\n" % (key,value))
paramout.close()
print "PASO %i: %f"%(cyclemin,totalerror)
if cyclemin >= maxcycleminimization:
cyclemin = 0
return
cyclemin += 1
return totalerror
def run_minstatics(paramtestdic):
global datadic
staticlist = []
print "\n symbol rc@ g@ MAE RMSE BIAS slope intercept R2 eslope eintercept eR2"
for elementsymbol in ["C", "O", "N", "H", "F", "Cl", "Br", "P", "S"]:
#~ try:
mae, rmse, bias, r2, slope, intercept, errorr2, errorslope, errorintercept = calc_staticsbyelement("0000", datadic, elementsymbol)
radio = "rc@" + elementsymbol + ".a"
gamma = "g@" + elementsymbol + ".a"
print "<%s> %1.4f %1.4f %3.3f %3.3f %3.3f %3.3f %3.3f %1.4f %3.3f %3.3f %1.4f" % (elementsymbol, paramtestdic[radio], paramtestdic[gamma], mae, rmse, bias, slope, intercept, r2, errorslope, errorintercept, errorr2)
staticlist.append([mae,elementsymbol, paramtestdic[radio], paramtestdic[gamma],rmse, bias, slope, intercept, r2, errorslope, errorintercept, errorr2])
#~ except:
#~ print "<%s> sin compuestos" %(elementsymbol)
newstaticlist = sorted(staticlist, key=lambda lista: lista[0], reverse=True)
return newstaticlist
# select worst element (with the highest individual MAE contribution)
staticlist = run_minstatics(paramtestdic)
print staticlist
elementsymbol = staticlist[0][1]
minmaebyelement = staticlist[0][0]
initialradiistepsize = 0.08
initialgammastepsize = 0.02
radiistepsize = initialradiistepsize
gammastepsize = initialgammastepsize
initialradiirange = radiirange
initialgammarange = gammarange
for ncycle in range(0,maxiter):
radioselect = "rc@" + elementsymbol + ".a"
gammaselect = "g@" + elementsymbol + ".a"
radiilist = make_modification(paramtestdic[radioselect],radiirange,radiistepsize)
gammalist = make_modification(paramtestdic[gammaselect],gammarange,gammastepsize)
print "SELECTED %s : number test %i, radii step %f , gamma step %f" % (elementsymbol, len(radiilist)*len(gammalist), radiistepsize, gammastepsize)
for radio in radiilist:
for gamma in gammalist:
paramtestdic[radioselect] = radio[0]
paramtestdic[gammaselect] = gamma[0]
rsolvtest = paramtestdic["rsolv@"] #take rsolv parameter
if nptype=="claverie" or nptype=="claverietype":
yrel = 4*PI*NS*rsolvtest*rsolvtest*rsolvtest/3.0 # ratio to use in claverie cavitation term
else:
yrel = 0
totalerror, mae, rmse, bias, r2, slope, intercept, datadic = calc_error("0000", paramtestdic, datadic, extrakeys, extrakeyssolv + " RSOLV=% .3f" % (rsolvtest), outfile, nptype, yrel)
staticlist = run_minstatics(paramtestdic)
if totalerror < mintotalerror:
mintotalerror = totalerror
# write summary file with the low totalerror step
print_summary(mintotalerror, mae, rmse, bias, r2, slope, intercept, ncycle,datadic,nptype)
# write parameter values
paramout = open("list_param.out","w")
for key, value in sorted(paramtestdic.iteritems()):
paramout.write("%-3s \t %.4f\n" % (key,value))
paramout.close()
for staticbyelement in staticlist:
if elementsymbol in staticbyelement[1]:
maebyelement = staticbyelement[0]
if maebyelement < minmaebyelement:
minmaebyelement = maebyelement
minradio = radio[0]
mingamma = gamma[0]
nextelementsymbol = staticlist[0][1]
nextmae = staticlist[0][0]
minstaticlist = staticlist
paramtestdic[radioselect] = minradio
paramtestdic[gammaselect] = mingamma
#~ if minmaebyelement < nextelementsymbol:
if nextelementsymbol == elementsymbol:
#minimize
params = Parameters()
for ikey, ivalue in paramtestdic.iteritems():
maxlimit = maxlimitdic[ikey]
minlimit = minlimitdic[ikey]
ikey = ikey.replace("@","zzz") #replace @ with zzz because this character is not supported by lmfit library
ikey = ikey.replace(".","xxx") #replace . with xxx because this character is not supported by lmfit library
#~ params.add(ikey, value=ivalue)
#~ if "rczzz" in ikey:
#~ params.add(ikey, value=ivalue, min=minlimit, max=maxlimit)
#~ else:
#~ params.add(ikey, ivalue, False)
params.add(ikey, value=ivalue, min=minlimit, max=maxlimit)
print params
#experimental data
datacompoundnamelist = []
for ikey, ivalue in datadic.iteritems():
datacompoundnamelist.append(ikey) #to convert in error function to a dictionary
cyclemin = 0
try:
result = minimize(fcn2min, params, args=(extrakeys, extrakeyssolv, outfile, nptype, datacompoundnamelist), method='powell')
except:
pass
staticlist = run_minstatics(paramtestdic)
for staticbyelement in staticlist:
if elementsymbol in staticbyelement[1]:
maebyelement = staticbyelement[0]
if maebyelement < minmaebyelement:
minmaebyelement = maebyelement
minradio = radio[0]
mingamma = gamma[0]
nextelementsymbol = staticlist[0][1]
nextmae = staticlist[0][0]
minstaticlist = staticlist
#~ gammastepsize = gammastepsize * 0.8
#~ radiistepsize = radiistepsize * 0.8
#~ radiirange = radiirange * 0.8
#~ gammarange = gammarange * 0.8
else:
radiistepsize = initialradiistepsize
gammastepsize = initialgammastepsize
#~ radiirange = initialradiirange
#~ gammarange = initialgammarange
elementsymbol = nextelementsymbol
minmaebyelement = nextmae
elif calculationtype == 'minstatics_slope':
def make_modification(value0,rangevalue,stepsize):
currentvalue = value0 - rangevalue
valuelist = []
while currentvalue < value0 + rangevalue:
valuelist.append([currentvalue,None])
currentvalue += stepsize
return valuelist
from lmfit import minimize, Parameters, Parameter, report_fit
maxcycleminimization = 30 #set minimization period
#function to minimize
def fcn2min(params, extrakeys, extrakeyssolv, outfile, nptype, datacompoundnamelist):
global mintotalerror #the most minimal error
global ncycle #general count of cycles
global datadic #store experimental, calculated and contributions to solvation energies for all compounds
global paramtestdic # params to test
global cyclemin # cycle number of the period of minimization
#rebuild paramtestdic
paramtestdic={}
for key, values in params.iteritems():
key = key.replace("zzz","@") #change to original key formats
key = key.replace("xxx",".")
paramtestdic[key]=values.value
rsolvtest = paramtestdic["rsolv@"]
if nptype=="claverie" or nptype=="claverietype":
yrel = 4*PI*NS*rsolvtest*rsolvtest*rsolvtest/3.0 # ratio to use in claverie cavitation term
else:
yrel = 0
totalerror, mae, rmse, bias, r2, slope, intercept, datadic = calc_error("0000", paramtestdic, datadic, extrakeys, extrakeyssolv + " RSOLV=% .3f" % (rsolvtest), outfile, nptype, yrel)
if totalerror < mintotalerror:
mintotalerror = totalerror
# write summary file with the low totalerror step
print_summary(mintotalerror, mae, rmse, bias, r2, slope, intercept, ncycle,datadic,nptype)
# write parameter values
paramout = open("list_param.out","w")
| for key, value in sorted(paramtestdic.iteritems()): | 4,363 | lcc_e | python | null | 94bb9fcc9960c219f0729ff1ef8ec12d68b4b09712b835f4 |
|
# bundle2.py - generic container format to transmit arbitrary data.
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Handling of the new bundle2 format
The goal of bundle2 is to act as an atomically packet to transmit a set of
payloads in an application agnostic way. It consist in a sequence of "parts"
that will be handed to and processed by the application layer.
General format architecture
===========================
The format is architectured as follow
- magic string
- stream level parameters
- payload parts (any number)
- end of stream marker.
the Binary format
============================
All numbers are unsigned and big-endian.
stream level parameters
------------------------
Binary format is as follow
:params size: int32
The total number of Bytes used by the parameters
:params value: arbitrary number of Bytes
A blob of `params size` containing the serialized version of all stream level
parameters.
The blob contains a space separated list of parameters. Parameters with value
are stored in the form `<name>=<value>`. Both name and value are urlquoted.
Empty name are obviously forbidden.
Name MUST start with a letter. If this first letter is lower case, the
parameter is advisory and can be safely ignored. However when the first
letter is capital, the parameter is mandatory and the bundling process MUST
stop if he is not able to proceed it.
Stream parameters use a simple textual format for two main reasons:
- Stream level parameters should remain simple and we want to discourage any
crazy usage.
- Textual data allow easy human inspection of a bundle2 header in case of
troubles.
Any Applicative level options MUST go into a bundle2 part instead.
Payload part
------------------------
Binary format is as follow
:header size: int32
The total number of Bytes used by the part headers. When the header is empty
(size = 0) this is interpreted as the end of stream marker.
:header:
The header defines how to interpret the part. It contains two piece of
data: the part type, and the part parameters.
The part type is used to route an application level handler, that can
interpret payload.
Part parameters are passed to the application level handler. They are
meant to convey information that will help the application level object to
interpret the part payload.
The binary format of the header is has follow
:typesize: (one byte)
:parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
:partid: A 32bits integer (unique in the bundle) that can be used to refer
to this part.
:parameters:
Part's parameter may have arbitrary content, the binary structure is::
<mandatory-count><advisory-count><param-sizes><param-data>
:mandatory-count: 1 byte, number of mandatory parameters
:advisory-count: 1 byte, number of advisory parameters
:param-sizes:
N couple of bytes, where N is the total number of parameters. Each
couple contains (<size-of-key>, <size-of-value) for one parameter.
:param-data:
A blob of bytes from which each parameter key and value can be
retrieved using the list of size couples stored in the previous
field.
Mandatory parameters comes first, then the advisory ones.
Each parameter's key MUST be unique within the part.
:payload:
payload is a series of `<chunksize><chunkdata>`.
`chunksize` is an int32, `chunkdata` are plain bytes (as much as
`chunksize` says)` The payload part is concluded by a zero size chunk.
The current implementation always produces either zero or one chunk.
This is an implementation limitation that will ultimately be lifted.
`chunksize` can be negative to trigger special case processing. No such
processing is in place yet.
Bundle processing
============================
Each part is processed in order using a "part handler". Handler are registered
for a certain part type.
The matching of a part to its handler is case insensitive. The case of the
part type is used to know if a part is mandatory or advisory. If the Part type
contains any uppercase char it is considered mandatory. When no handler is
known for a Mandatory part, the process is aborted and an exception is raised.
If the part is advisory and no handler is known, the part is ignored. When the
process is aborted, the full bundle is still read from the stream to keep the
channel usable. But none of the part read from an abort are processed. In the
future, dropping the stream may become an option for channel we do not care to
preserve.
"""
import errno
import sys
import util
import struct
import urllib
import string
import obsolete
import pushkey
import url
import re
import changegroup, error
from i18n import _
_pack = struct.pack
_unpack = struct.unpack
_fstreamparamsize = '>i'
_fpartheadersize = '>i'
_fparttypesize = '>B'
_fpartid = '>I'
_fpayloadsize = '>i'
_fpartparamcount = '>BB'
preferedchunksize = 4096
_parttypeforbidden = re.compile('[^a-zA-Z0-9_:-]')
def validateparttype(parttype):
"""raise ValueError if a parttype contains invalid character"""
if _parttypeforbidden.search(parttype):
raise ValueError(parttype)
def _makefpartparamsizes(nbparams):
"""return a struct format to read part parameter sizes
The number parameters is variable so we need to build that format
dynamically.
"""
return '>'+('BB'*nbparams)
parthandlermapping = {}
def parthandler(parttype, params=()):
"""decorator that register a function as a bundle2 part handler
eg::
@parthandler('myparttype', ('mandatory', 'param', 'handled'))
def myparttypehandler(...):
'''process a part of type "my part".'''
...
"""
validateparttype(parttype)
def _decorator(func):
lparttype = parttype.lower() # enforce lower case matching.
assert lparttype not in parthandlermapping
parthandlermapping[lparttype] = func
func.params = frozenset(params)
return func
return _decorator
class unbundlerecords(object):
"""keep record of what happens during and unbundle
New records are added using `records.add('cat', obj)`. Where 'cat' is a
category of record and obj is an arbitrary object.
`records['cat']` will return all entries of this category 'cat'.
Iterating on the object itself will yield `('category', obj)` tuples
for all entries.
All iterations happens in chronological order.
"""
def __init__(self):
self._categories = {}
self._sequences = []
self._replies = {}
def add(self, category, entry, inreplyto=None):
"""add a new record of a given category.
The entry can then be retrieved in the list returned by
self['category']."""
self._categories.setdefault(category, []).append(entry)
self._sequences.append((category, entry))
if inreplyto is not None:
self.getreplies(inreplyto).add(category, entry)
def getreplies(self, partid):
"""get the records that are replies to a specific part"""
return self._replies.setdefault(partid, unbundlerecords())
def __getitem__(self, cat):
return tuple(self._categories.get(cat, ()))
def __iter__(self):
return iter(self._sequences)
def __len__(self):
return len(self._sequences)
def __nonzero__(self):
return bool(self._sequences)
class bundleoperation(object):
"""an object that represents a single bundling process
Its purpose is to carry unbundle-related objects and states.
A new object should be created at the beginning of each bundle processing.
The object is to be returned by the processing function.
The object has very little content now it will ultimately contain:
* an access to the repo the bundle is applied to,
* a ui object,
* a way to retrieve a transaction to add changes to the repo,
* a way to record the result of processing each part,
* a way to construct a bundle response when applicable.
"""
def __init__(self, repo, transactiongetter, captureoutput=True):
self.repo = repo
self.ui = repo.ui
self.records = unbundlerecords()
self.gettransaction = transactiongetter
self.reply = None
self.captureoutput = captureoutput
class TransactionUnavailable(RuntimeError):
pass
def _notransaction():
"""default method to get a transaction while processing a bundle
Raise an exception to highlight the fact that no transaction was expected
to be created"""
raise TransactionUnavailable()
def processbundle(repo, unbundler, transactiongetter=None, op=None):
"""This function process a bundle, apply effect to/from a repo
It iterates over each part then searches for and uses the proper handling
code to process the part. Parts are processed in order.
This is very early version of this function that will be strongly reworked
before final usage.
Unknown Mandatory part will abort the process.
It is temporarily possible to provide a prebuilt bundleoperation to the
function. This is used to ensure output is properly propagated in case of
an error during the unbundling. This output capturing part will likely be
reworked and this ability will probably go away in the process.
"""
if op is None:
if transactiongetter is None:
transactiongetter = _notransaction
op = bundleoperation(repo, transactiongetter)
# todo:
# - replace this is a init function soon.
# - exception catching
unbundler.params
iterparts = unbundler.iterparts()
part = None
try:
for part in iterparts:
_processpart(op, part)
except Exception, exc:
for part in iterparts:
# consume the bundle content
part.seek(0, 2)
# Small hack to let caller code distinguish exceptions from bundle2
# processing from processing the old format. This is mostly
# needed to handle different return codes to unbundle according to the
# type of bundle. We should probably clean up or drop this return code
# craziness in a future version.
exc.duringunbundle2 = True
salvaged = []
if op.reply is not None:
salvaged = op.reply.salvageoutput()
exc._bundle2salvagedoutput = salvaged
raise
return op
def _processpart(op, part):
"""process a single part from a bundle
The part is guaranteed to have been fully consumed when the function exits
(even if an exception is raised)."""
try:
try:
handler = parthandlermapping.get(part.type)
if handler is None:
raise error.UnsupportedPartError(parttype=part.type)
op.ui.debug('found a handler for part %r\n' % part.type)
unknownparams = part.mandatorykeys - handler.params
if unknownparams:
unknownparams = list(unknownparams)
unknownparams.sort()
raise error.UnsupportedPartError(parttype=part.type,
params=unknownparams)
except error.UnsupportedPartError, exc:
if part.mandatory: # mandatory parts
raise
op.ui.debug('ignoring unsupported advisory part %s\n' % exc)
return # skip to part processing
# handler is called outside the above try block so that we don't
# risk catching KeyErrors from anything other than the
# parthandlermapping lookup (any KeyError raised by handler()
# itself represents a defect of a different variety).
output = None
if op.captureoutput and op.reply is not None:
op.ui.pushbuffer(error=True, subproc=True)
output = ''
try:
handler(op, part)
finally:
if output is not None:
output = op.ui.popbuffer()
if output:
outpart = op.reply.newpart('output', data=output,
mandatory=False)
outpart.addparam('in-reply-to', str(part.id), mandatory=False)
finally:
# consume the part content to not corrupt the stream.
part.seek(0, 2)
def decodecaps(blob):
"""decode a bundle2 caps bytes blob into a dictionary
The blob is a list of capabilities (one per line)
Capabilities may have values using a line of the form::
capability=value1,value2,value3
The values are always a list."""
caps = {}
for line in blob.splitlines():
if not line:
continue
if '=' not in line:
key, vals = line, ()
else:
key, vals = line.split('=', 1)
vals = vals.split(',')
key = urllib.unquote(key)
vals = [urllib.unquote(v) for v in vals]
caps[key] = vals
return caps
def encodecaps(caps):
"""encode a bundle2 caps dictionary into a bytes blob"""
chunks = []
for ca in sorted(caps):
vals = caps[ca]
ca = urllib.quote(ca)
vals = [urllib.quote(v) for v in vals]
if vals:
ca = "%s=%s" % (ca, ','.join(vals))
chunks.append(ca)
return '\n'.join(chunks)
class bundle20(object):
"""represent an outgoing bundle2 container
Use the `addparam` method to add stream level parameter. and `newpart` to
populate it. Then call `getchunks` to retrieve all the binary chunks of
data that compose the bundle2 container."""
_magicstring = 'HG20'
def __init__(self, ui, capabilities=()):
self.ui = ui
self._params = []
self._parts = []
self.capabilities = dict(capabilities)
@property
def nbparts(self):
"""total number of parts added to the bundler"""
return len(self._parts)
# methods used to defines the bundle2 content
def addparam(self, name, value=None):
"""add a stream level parameter"""
if not name:
raise ValueError('empty parameter name')
if name[0] not in string.letters:
raise ValueError('non letter first character: %r' % name)
self._params.append((name, value))
def addpart(self, part):
"""add a new part to the bundle2 container
Parts contains the actual applicative payload."""
assert part.id is None
part.id = len(self._parts) # very cheap counter
self._parts.append(part)
def newpart(self, typeid, *args, **kwargs):
"""create a new part and add it to the containers
As the part is directly added to the containers. For now, this means
that any failure to properly initialize the part after calling
``newpart`` should result in a failure of the whole bundling process.
You can still fall back to manually create and add if you need better
control."""
part = bundlepart(typeid, *args, **kwargs)
self.addpart(part)
return part
# methods used to generate the bundle2 stream
def getchunks(self):
self.ui.debug('start emission of %s stream\n' % self._magicstring)
yield self._magicstring
param = self._paramchunk()
self.ui.debug('bundle parameter: %s\n' % param)
yield _pack(_fstreamparamsize, len(param))
if param:
yield param
self.ui.debug('start of parts\n')
for part in self._parts:
self.ui.debug('bundle part: "%s"\n' % part.type)
for chunk in part.getchunks():
yield chunk
self.ui.debug('end of bundle\n')
yield _pack(_fpartheadersize, 0)
def _paramchunk(self):
"""return a encoded version of all stream parameters"""
blocks = []
for par, value in self._params:
par = urllib.quote(par)
if value is not None:
value = urllib.quote(value)
par = '%s=%s' % (par, value)
blocks.append(par)
return ' '.join(blocks)
def salvageoutput(self):
"""return a list with a copy of all output parts in the bundle
This is meant to be used during error handling to make sure we preserve
server output"""
salvaged = []
for part in self._parts:
if part.type.startswith('output'):
salvaged.append(part.copy())
return salvaged
class unpackermixin(object):
"""A mixin to extract bytes and struct data from a stream"""
def __init__(self, fp):
self._fp = fp
self._seekable = (util.safehasattr(fp, 'seek') and
util.safehasattr(fp, 'tell'))
def _unpack(self, format):
"""unpack this struct format from the stream"""
data = self._readexact(struct.calcsize(format))
return _unpack(format, data)
def _readexact(self, size):
"""read exactly <size> bytes from the stream"""
return changegroup.readexactly(self._fp, size)
def seek(self, offset, whence=0):
"""move the underlying file pointer"""
if self._seekable:
return self._fp.seek(offset, whence)
else:
raise NotImplementedError(_('File pointer is not seekable'))
def tell(self):
"""return the file offset, or None if file is not seekable"""
if self._seekable:
try:
return self._fp.tell()
except IOError, e:
if e.errno == errno.ESPIPE:
self._seekable = False
else:
raise
return None
def close(self):
"""close underlying file"""
if util.safehasattr(self._fp, 'close'):
return self._fp.close()
def getunbundler(ui, fp, header=None):
"""return a valid unbundler object for a given header"""
if header is None:
header = changegroup.readexactly(fp, 4)
magic, version = header[0:2], header[2:4]
if magic != 'HG':
raise util.Abort(_('not a Mercurial bundle'))
unbundlerclass = formatmap.get(version)
if unbundlerclass is None:
raise util.Abort(_('unknown bundle version %s') % version)
unbundler = unbundlerclass(ui, fp)
ui.debug('start processing of %s stream\n' % header)
return unbundler
class unbundle20(unpackermixin):
"""interpret a bundle2 stream
This class is fed with a binary stream and yields parts through its
`iterparts` methods."""
def __init__(self, ui, fp):
"""If header is specified, we do not read it out of the stream."""
self.ui = ui
super(unbundle20, self).__init__(fp)
@util.propertycache
def params(self):
"""dictionary of stream level parameters"""
self.ui.debug('reading bundle2 stream parameters\n')
params = {}
paramssize = self._unpack(_fstreamparamsize)[0]
if paramssize < 0:
raise error.BundleValueError('negative bundle param size: %i'
% paramssize)
if paramssize:
for p in self._readexact(paramssize).split(' '):
p = p.split('=', 1)
p = [urllib.unquote(i) for i in p]
if len(p) < 2:
p.append(None)
self._processparam(*p)
params[p[0]] = p[1]
return params
def _processparam(self, name, value):
"""process a parameter, applying its effect if needed
Parameter starting with a lower case letter are advisory and will be
ignored when unknown. Those starting with an upper case letter are
mandatory and will this function will raise a KeyError when unknown.
Note: no option are currently supported. Any input will be either
ignored or failing.
"""
if not name:
raise ValueError('empty parameter name')
if name[0] not in string.letters:
raise ValueError('non letter first character: %r' % name)
# Some logic will be later added here to try to process the option for
# a dict of known parameter.
if name[0].islower():
self.ui.debug("ignoring unknown parameter %r\n" % name)
else:
raise error.UnsupportedPartError(params=(name,))
def iterparts(self):
"""yield all parts contained in the stream"""
# make sure param have been loaded
self.params
self.ui.debug('start extraction of bundle2 parts\n')
headerblock = self._readpartheader()
while headerblock is not None:
part = unbundlepart(self.ui, headerblock, self._fp)
yield part
part.seek(0, 2)
headerblock = self._readpartheader()
self.ui.debug('end of bundle2 stream\n')
def _readpartheader(self):
"""reads a part header size and return the bytes blob
returns None if empty"""
headersize = self._unpack(_fpartheadersize)[0]
if headersize < 0:
raise error.BundleValueError('negative part header size: %i'
% headersize)
self.ui.debug('part header size: %i\n' % headersize)
if headersize:
return self._readexact(headersize)
return None
def compressed(self):
return False
formatmap = {'20': unbundle20}
class bundlepart(object):
"""A bundle2 part contains application level payload
The part `type` is used to route the part to the application level
handler.
The part payload is contained in ``part.data``. It could be raw bytes or a
generator of byte chunks.
You can add parameters to the part using the ``addparam`` method.
Parameters can be either mandatory (default) or advisory. Remote side
should be able to safely ignore the advisory ones.
Both data and parameters cannot be modified after the generation has begun.
"""
def __init__(self, parttype, mandatoryparams=(), advisoryparams=(),
data='', mandatory=True):
validateparttype(parttype)
self.id = None
self.type = parttype
self._data = data
self._mandatoryparams = list(mandatoryparams)
self._advisoryparams = list(advisoryparams)
# checking for duplicated entries
self._seenparams = set()
for pname, __ in self._mandatoryparams + self._advisoryparams:
if pname in self._seenparams:
raise RuntimeError('duplicated params: %s' % pname)
self._seenparams.add(pname)
# status of the part's generation:
# - None: not started,
# - False: currently generated,
# - True: generation done.
self._generated = None
self.mandatory = mandatory
def copy(self):
"""return a copy of the part
The new part have the very same content but no partid assigned yet.
Parts with generated data cannot be copied."""
assert not util.safehasattr(self.data, 'next')
return self.__class__(self.type, self._mandatoryparams,
self._advisoryparams, self._data, self.mandatory)
# methods used to defines the part content
def __setdata(self, data):
if self._generated is not None:
raise error.ReadOnlyPartError('part is being generated')
self._data = data
def __getdata(self):
return self._data
data = property(__getdata, __setdata)
@property
def mandatoryparams(self):
# make it an immutable tuple to force people through ``addparam``
return tuple(self._mandatoryparams)
@property
def advisoryparams(self):
# make it an immutable tuple to force people through ``addparam``
return tuple(self._advisoryparams)
def addparam(self, name, value='', mandatory=True):
if self._generated is not None:
raise error.ReadOnlyPartError('part is being generated')
if name in self._seenparams:
raise ValueError('duplicated params: %s' % name)
self._seenparams.add(name)
params = self._advisoryparams
if mandatory:
params = self._mandatoryparams
params.append((name, value))
# methods used to generates the bundle2 stream
def getchunks(self):
if self._generated is not None:
raise RuntimeError('part can only be consumed once')
self._generated = False
#### header
if self.mandatory:
parttype = self.type.upper()
else:
parttype = self.type.lower()
## parttype
header = [_pack(_fparttypesize, len(parttype)),
parttype, _pack(_fpartid, self.id),
]
## parameters
# count
manpar = self.mandatoryparams
advpar = self.advisoryparams
header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
# size
parsizes = []
for key, value in manpar:
parsizes.append(len(key))
parsizes.append(len(value))
for key, value in advpar:
parsizes.append(len(key))
parsizes.append(len(value))
paramsizes = _pack(_makefpartparamsizes(len(parsizes) / 2), *parsizes)
header.append(paramsizes)
# key, value
for key, value in manpar:
header.append(key)
header.append(value)
for key, value in advpar:
header.append(key)
header.append(value)
## finalize header
headerchunk = ''.join(header)
yield _pack(_fpartheadersize, len(headerchunk))
yield headerchunk
## payload
try:
for chunk in self._payloadchunks():
yield _pack(_fpayloadsize, len(chunk))
yield chunk
except Exception, exc:
# backup exception data for later
exc_info = sys.exc_info()
msg = 'unexpected error: %s' % exc
interpart = bundlepart('error:abort', [('message', msg)],
mandatory=False)
interpart.id = 0
yield _pack(_fpayloadsize, -1)
for chunk in interpart.getchunks():
yield chunk
# abort current part payload
yield _pack(_fpayloadsize, 0)
raise exc_info[0], exc_info[1], exc_info[2]
# end of payload
yield _pack(_fpayloadsize, 0)
self._generated = True
def _payloadchunks(self):
"""yield chunks of a the part payload
Exists to handle the different methods to provide data to a part."""
# we only support fixed size data now.
# This will be improved in the future.
if util.safehasattr(self.data, 'next'):
buff = util.chunkbuffer(self.data)
chunk = buff.read(preferedchunksize)
while chunk:
yield chunk
chunk = buff.read(preferedchunksize)
elif len(self.data):
yield self.data
flaginterrupt = -1
class interrupthandler(unpackermixin):
"""read one part and process it with restricted capability
This allows to transmit exception raised on the producer size during part
iteration while the consumer is reading a part.
Part processed in this manner only have access to a ui object,"""
def __init__(self, ui, fp):
super(interrupthandler, self).__init__(fp)
self.ui = ui
def _readpartheader(self):
"""reads a part header size and return the bytes blob
returns None if empty"""
headersize = self._unpack(_fpartheadersize)[0]
if headersize < 0:
raise error.BundleValueError('negative part header size: %i'
% headersize)
self.ui.debug('part header size: %i\n' % headersize)
if headersize:
return self._readexact(headersize)
return None
def __call__(self):
self.ui.debug('bundle2 stream interruption, looking for a part.\n')
headerblock = self._readpartheader()
if headerblock is None:
self.ui.debug('no part found during interruption.\n')
return
part = unbundlepart(self.ui, headerblock, self._fp)
op = interruptoperation(self.ui)
_processpart(op, part)
class interruptoperation(object):
"""A limited operation to be use by part handler during interruption
It only have access to an ui object.
"""
def __init__(self, ui):
self.ui = ui
self.reply = None
self.captureoutput = False
@property
def repo(self):
raise RuntimeError('no repo access from stream interruption')
def gettransaction(self):
raise TransactionUnavailable('no repo access from stream interruption')
class unbundlepart(unpackermixin):
"""a bundle part read from a bundle"""
def __init__(self, ui, header, fp):
super(unbundlepart, self).__init__(fp)
self.ui = ui
# unbundle state attr
self._headerdata = header
self._headeroffset = 0
self._initialized = False
self.consumed = False
# part data
self.id = None
self.type = None
self.mandatoryparams = None
self.advisoryparams = None
self.params = None
self.mandatorykeys = ()
self._payloadstream = None
self._readheader()
self._mandatory = None
self._chunkindex = [] #(payload, file) position tuples for chunk starts
self._pos = 0
def _fromheader(self, size):
"""return the next <size> byte from the header"""
offset = self._headeroffset
data = self._headerdata[offset:(offset + size)]
self._headeroffset = offset + size
return data
def _unpackheader(self, format):
"""read given format from header
This automatically compute the size of the format to read."""
data = self._fromheader(struct.calcsize(format))
return _unpack(format, data)
def _initparams(self, mandatoryparams, advisoryparams):
"""internal function to setup all logic related parameters"""
# make it read only to prevent people touching it by mistake.
self.mandatoryparams = tuple(mandatoryparams)
self.advisoryparams = tuple(advisoryparams)
# user friendly UI
self.params = dict(self.mandatoryparams)
self.params.update(dict(self.advisoryparams))
self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
def _payloadchunks(self, chunknum=0):
'''seek to specified chunk and start yielding data'''
if len(self._chunkindex) == 0:
assert chunknum == 0, 'Must start with chunk 0'
self._chunkindex.append((0, super(unbundlepart, self).tell()))
else:
assert chunknum < len(self._chunkindex), \
'Unknown chunk %d' % chunknum
super(unbundlepart, self).seek(self._chunkindex[chunknum][1])
pos = self._chunkindex[chunknum][0]
payloadsize = self._unpack(_fpayloadsize)[0]
self.ui.debug('payload chunk size: %i\n' % payloadsize)
while payloadsize:
if payloadsize == flaginterrupt:
# interruption detection, the handler will now read a
# single part and process it.
interrupthandler(self.ui, self._fp)()
elif payloadsize < 0:
msg = 'negative payload chunk size: %i' % payloadsize
raise error.BundleValueError(msg)
else:
result = self._readexact(payloadsize)
chunknum += 1
pos += payloadsize
if chunknum == len(self._chunkindex):
self._chunkindex.append((pos,
super(unbundlepart, self).tell()))
yield result
payloadsize = self._unpack(_fpayloadsize)[0]
self.ui.debug('payload chunk size: %i\n' % payloadsize)
def _findchunk(self, pos):
'''for a given payload position, return a chunk number and offset'''
for chunk, (ppos, fpos) in enumerate(self._chunkindex):
if ppos == pos:
return chunk, 0
elif ppos > pos:
return chunk - 1, pos - self._chunkindex[chunk - 1][0]
raise ValueError('Unknown chunk')
def _readheader(self):
"""read the header and setup the object"""
typesize = self._unpackheader(_fparttypesize)[0]
self.type = self._fromheader(typesize)
self.ui.debug('part type: "%s"\n' % self.type)
self.id = self._unpackheader(_fpartid)[0]
self.ui.debug('part id: "%s"\n' % self.id)
# extract mandatory bit from type
self.mandatory = (self.type != self.type.lower())
self.type = self.type.lower()
## reading parameters
# param count
mancount, advcount = self._unpackheader(_fpartparamcount)
self.ui.debug('part parameters: %i\n' % (mancount + advcount))
# param size
fparamsizes = _makefpartparamsizes(mancount + advcount)
paramsizes = self._unpackheader(fparamsizes)
# make it a list of couple again
paramsizes = zip(paramsizes[::2], paramsizes[1::2])
# split mandatory from advisory
mansizes = paramsizes[:mancount]
advsizes = paramsizes[mancount:]
# retrieve param value
manparams = []
for key, value in mansizes:
manparams.append((self._fromheader(key), self._fromheader(value)))
advparams = []
for key, value in advsizes:
advparams.append((self._fromheader(key), self._fromheader(value)))
self._initparams(manparams, advparams)
## part payload
self._payloadstream = util.chunkbuffer(self._payloadchunks())
# we read the data, tell it
self._initialized = True
def read(self, size=None):
"""read payload data"""
if not self._initialized:
self._readheader()
if size is None:
data = self._payloadstream.read()
else:
data = self._payloadstream.read(size)
if size is None or len(data) < size:
self.consumed = True
self._pos += len(data)
return data
def tell(self):
return self._pos
def seek(self, offset, whence=0):
if whence == 0:
newpos = offset
elif whence == 1:
newpos = self._pos + offset
elif whence == 2:
if not self.consumed:
self.read()
newpos = self._chunkindex[-1][0] - offset
else:
raise ValueError('Unknown whence value: %r' % (whence,))
if newpos > self._chunkindex[-1][0] and not self.consumed:
self.read()
if not 0 <= newpos <= self._chunkindex[-1][0]:
raise ValueError('Offset out of range')
if self._pos != newpos:
chunk, internaloffset = self._findchunk(newpos)
self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
adjust = self.read(internaloffset)
if len(adjust) != internaloffset:
raise util.Abort(_('Seek failed\n'))
self._pos = newpos
capabilities = {'HG20': (),
'listkeys': (),
'pushkey': (),
'digests': tuple(sorted(util.DIGESTS.keys())),
'remote-changegroup': ('http', 'https'),
}
def getrepocaps(repo, allowpushback=False):
"""return the bundle2 capabilities for a given repo
Exists to allow extensions (like evolution) to mutate the capabilities.
"""
caps = capabilities.copy()
caps['changegroup'] = tuple(sorted(changegroup.packermap.keys()))
if obsolete.isenabled(repo, obsolete.exchangeopt):
supportedformat = tuple('V%i' % v for v in obsolete.formats)
caps['obsmarkers'] = supportedformat
if allowpushback:
caps['pushback'] = ()
return caps
def bundle2caps(remote):
"""return the bundle capabilities of a peer as dict"""
raw = remote.capable('bundle2')
if not raw and raw != '':
return {}
capsblob = urllib.unquote(remote.capable('bundle2'))
return decodecaps(capsblob)
def obsmarkersversion(caps):
"""extract the list of supported obsmarkers versions from a bundle2caps dict
"""
obscaps = caps.get('obsmarkers', ())
return [int(c[1:]) for c in obscaps if c.startswith('V')]
@parthandler('changegroup', ('version',))
def handlechangegroup(op, inpart):
"""apply a changegroup part on the repo
This is a very early implementation that will massive rework before being
inflicted to any end-user.
"""
# Make sure we trigger a transaction creation
#
# The addchangegroup function will get a transaction object by itself, but
# we need to make sure we trigger the creation of a transaction object used
# for the whole processing scope.
op.gettransaction()
unpackerversion = inpart.params.get('version', '01')
# We should raise an appropriate exception here
unpacker = changegroup.packermap[unpackerversion][1]
cg = unpacker(inpart, 'UN')
# the source and url passed here are overwritten by the one contained in
# the transaction.hookargs argument. So 'bundle2' is a placeholder
ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2')
op.records.add('changegroup', {'return': ret})
if op.reply is not None:
# This is definitely not the final form of this
# return. But one need to start somewhere.
part = op.reply.newpart('reply:changegroup', mandatory=False)
part.addparam('in-reply-to', str(inpart.id), mandatory=False)
part.addparam('return', '%i' % ret, mandatory=False)
assert not inpart.read()
_remotechangegroupparams = tuple(['url', 'size', 'digests'] +
['digest:%s' % k for k in util.DIGESTS.keys()])
@parthandler('remote-changegroup', _remotechangegroupparams)
def handleremotechangegroup(op, inpart):
"""apply a bundle10 on the repo, given an url and validation information
All the information about the remote bundle to import are given as
parameters. The parameters include:
- url: the url to the bundle10.
- size: the bundle10 file size. It is used to validate what was
retrieved by the client matches the server knowledge about the bundle.
- digests: a space separated list of the digest types provided as
parameters.
- digest:<digest-type>: the hexadecimal representation of the digest with
that name. Like the size, it is used to validate what was retrieved by
the client matches what the server knows about the bundle.
When multiple digest types are given, all of them are checked.
"""
try:
raw_url = inpart.params['url']
except KeyError:
raise util.Abort(_('remote-changegroup: missing "%s" param') % 'url')
parsed_url = util.url(raw_url)
if parsed_url.scheme not in capabilities['remote-changegroup']:
raise util.Abort(_('remote-changegroup does not support %s urls') %
parsed_url.scheme)
try:
size = int(inpart.params['size'])
except ValueError:
raise util.Abort(_('remote-changegroup: invalid value for param "%s"')
% 'size')
except KeyError:
raise util.Abort(_('remote-changegroup: missing "%s" param') % 'size')
digests = {}
for typ in inpart.params.get('digests', '').split():
param = 'digest:%s' % typ
try:
value = inpart.params[param]
except KeyError:
raise util.Abort(_('remote-changegroup: missing "%s" param') %
param)
digests[typ] = value
real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
# Make sure we trigger a transaction creation
#
# The addchangegroup function will get a transaction object by itself, but
# we need to make sure we trigger the creation of a transaction object used
# for the whole processing scope.
op.gettransaction()
import exchange
cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
if not isinstance(cg, changegroup.cg1unpacker):
raise util.Abort(_('%s: not a bundle version 1.0') %
util.hidepassword(raw_url))
ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2')
op.records.add('changegroup', {'return': ret})
if op.reply is not None:
# This is definitely not the final form of this
# return. But one need to start somewhere.
part = op.reply.newpart('reply:changegroup')
part.addparam('in-reply-to', str(inpart.id), mandatory=False)
part.addparam('return', '%i' % ret, mandatory=False)
try:
real_part.validate()
except util.Abort, e:
raise util.Abort(_('bundle at %s is corrupted:\n%s') %
(util.hidepassword(raw_url), str(e)))
assert not inpart.read()
@parthandler('reply:changegroup', ('return', 'in-reply-to'))
def handlereplychangegroup(op, inpart):
ret = int(inpart.params['return'])
replyto = int(inpart.params['in-reply-to'])
op.records.add('changegroup', {'return': ret}, replyto)
@parthandler('check:heads')
def handlecheckheads(op, inpart):
"""check that head of the repo did not change
This is used to detect a push race when using unbundle.
This replaces the "heads" argument of unbundle."""
h = inpart.read(20)
heads = []
| while len(h) == 20: | 4,561 | lcc_e | python | null | dc69884722b73150bb62984666db56f2f16f5f14793e8856 |
|
# -*- coding: utf-8 -*-
# Copyright Martin Manns
# Distributed under the terms of the GNU General Public License
# --------------------------------------------------------------------
# pyspread is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyspread is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyspread. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------
"""
Workflows for pyspread
"""
from ast import literal_eval
from base64 import b85encode
import bz2
from contextlib import contextmanager
import csv
from itertools import cycle
import io
import os.path
from pathlib import Path
from shutil import move
from tempfile import NamedTemporaryFile
from typing import Iterable, Tuple
from PyQt5.QtCore import (
Qt, QMimeData, QModelIndex, QBuffer, QRect, QRectF, QItemSelectionModel)
from PyQt5.QtGui import QTextDocument, QImage, QPainter, QBrush, QPen
from PyQt5.QtWidgets import (
QApplication, QMessageBox, QInputDialog, QStyleOptionViewItem, QTableView,
QUndoCommand)
try:
from PyQt5.QtSvg import QSvgGenerator
except ImportError:
QSvgGenerator = None
try:
import matplotlib
import matplotlib.figure as matplotlib_figure
except ImportError:
matplotlib = None
matplotlib_figure = None
try:
import pyspread.commands as commands
from pyspread.dialogs \
import (DiscardChangesDialog, FileOpenDialog, GridShapeDialog,
FileSaveDialog, ImageFileOpenDialog, ChartDialog,
CellKeyDialog, FindDialog, ReplaceDialog, CsvFileImportDialog,
CsvImportDialog, CsvExportDialog, CsvExportAreaDialog,
FileExportDialog)
from pyspread.interfaces.pys import PysReader, PysWriter
from pyspread.lib.attrdict import AttrDict
from pyspread.lib.hashing import sign, verify
from pyspread.lib.selection import Selection
from pyspread.lib.typechecks import is_svg, check_shape_validity
from pyspread.lib.csv import csv_reader, convert
from pyspread.lib.file_helpers import \
(linecount, file_progress_gen, ProgressDialogCanceled)
from pyspread.model.model import CellAttribute
except ImportError:
import commands
from dialogs \
import (DiscardChangesDialog, FileOpenDialog, GridShapeDialog,
FileSaveDialog, ImageFileOpenDialog, ChartDialog,
CellKeyDialog, FindDialog, ReplaceDialog, CsvFileImportDialog,
CsvImportDialog, CsvExportDialog, CsvExportAreaDialog,
FileExportDialog)
from interfaces.pys import PysReader, PysWriter
from lib.attrdict import AttrDict
from lib.hashing import sign, verify
from lib.selection import Selection
from lib.typechecks import is_svg, check_shape_validity
from lib.csv import csv_reader, convert
from lib.file_helpers import \
(linecount, file_progress_gen, ProgressDialogCanceled)
from model.model import CellAttribute
class Workflows:
"""Workflow container class"""
cell2dialog = {} # Stores acrive chart dialogs
def __init__(self, main_window):
self.main_window = main_window
@contextmanager
def busy_cursor(self):
""":class:`~contextlib.contextmanager` that displays a busy cursor"""
QApplication.setOverrideCursor(Qt.WaitCursor)
yield
QApplication.restoreOverrideCursor()
@contextmanager
def prevent_updates(self):
""":class:`~contextlib.contextmanager` sets the prevent_updates state
The prevent_updates state prevents updates in main_window.grid.setData
"""
self.main_window.prevent_updates = True
yield
self.main_window.prevent_updates = False
def handle_changed_since_save(func, *args, **kwargs):
"""Decorator to handle changes since last saving the document
If changes are present then a dialog is displayed that asks if the
changes shall be discarded.
- If the user selects `Cancel` then `func` is not executed.
- If the user selects `Save` then the file is saved and `func` is
executed.
- If the user selects `Discard` then the file is not saved and `func`
is executed.
If no changes are present then `func` is directly executed.
After executing `func`, :func:`reset_changed_since_save` and
`update_main_window_title` are called.
"""
def function_wrapper(self, *args, **kwargs):
"""Check changes and display and handle the dialog"""
if self.main_window.settings.changed_since_save:
choice = DiscardChangesDialog(self.main_window).choice
if choice is None:
return
elif not choice:
# We try to save to a file
if self.file_save() is False:
# File could not be saved --> Abort
return
try:
func(self, *args, **kwargs)
except TypeError:
func(self) # No args accepted
self.reset_changed_since_save()
self.update_main_window_title()
return function_wrapper
def reset_changed_since_save(self):
"""Sets changed_since_save to False and updates the window title"""
# Change the main window filepath state
self.main_window.settings.changed_since_save = False
def update_main_window_title(self):
"""Change the main window title to reflect the current file name"""
# Get the current filepath
filepath = self.main_window.settings.last_file_input_path
if filepath == Path.home():
title = "pyspread"
else:
title = "{filename} - pyspread".format(filename=filepath.name)
self.main_window.setWindowTitle(title)
@handle_changed_since_save
def file_new(self):
"""File new workflow"""
# Get grid shape from user
old_shape = self.main_window.grid.model.code_array.shape
shape = GridShapeDialog(self.main_window, old_shape).shape
# Check if shape is valid
if shape is None:
return
try:
check_shape_validity(shape, self.main_window.settings.maxshape)
except ValueError as err:
self.main_window.statusBar().showMessage('Error: ' + str(err))
return
# Set current cell to upper left corner
for grid in self.main_window.grids:
grid.current = 0, 0, 0
# Select upper left cell because initial selection behaves strange
grid.reset_selection()
# Reset grid
self.main_window.grid.model.reset()
# Delete old filepath
self.main_window.settings.last_file_input_path = Path.home()
# Set new shape
self.main_window.grid.model.shape = shape
# Update cell spans and zoom because this is unsupported by the model
for grid in self.main_window.grids:
with grid.undo_resizing_row():
with grid.undo_resizing_column():
grid.update_cell_spans()
grid.update_zoom()
# Update index widgets
grid.update_index_widgets()
# Set current cell to upper left corner
for grid in self.main_window.grids:
grid.current = 0, 0, 0
# Change the main window filepath state
self.main_window.settings.changed_since_save = False
# Update macro editor
self.main_window.macro_panel.update()
# Exit safe mode
self.main_window.safe_mode = False
def count_file_lines(self, filepath: Path):
"""Returns line count of file in filepath
:param filepath: Path of file to be analyzed
"""
try:
with open(filepath, 'rb') as infile:
return linecount(infile)
except OSError as error:
self.main_window.statusBar().showMessage(str(error))
return
def filepath_open(self, filepath: Path):
"""Workflow for opening a file if a filepath is known
:param filepath: Path of file to be opened
"""
grid = self.main_window.grid
code_array = grid.model.code_array
# Get number of lines for progess dialog
filelines = self.count_file_lines(filepath)
if not filelines: # May not be None or 0
return
# Reset grid
grid.model.reset()
# Reset macro editor
self.main_window.macro_panel.macro_editor.clear()
# Is the file signed properly ?
self.main_window.safe_mode = True
signature_key = self.main_window.settings.signature_key
try:
with open(filepath, "rb") as infile:
signature_path = filepath.with_suffix(filepath.suffix + '.sig')
with open(signature_path, "rb") as sigfile:
self.main_window.safe_mode = not verify(infile.read(),
sigfile.read(),
signature_key)
except OSError:
self.main_window.safe_mode = True
# File compression handling
if filepath.suffix == ".pysu":
fopen = open
else:
fopen = bz2.open
# Process events before showing the modal progress dialog
QApplication.instance().processEvents()
# Load file into grid
title = "File open progress"
label = "Opening {}...".format(filepath.name)
try:
with fopen(filepath, "rb") as infile:
reader = PysReader(infile, code_array)
try:
for i, _ in file_progress_gen(self.main_window, reader,
title, label, filelines):
pass
except Exception as error:
grid.model.reset()
self.main_window.statusBar().showMessage(str(error))
self.main_window.safe_mode = False
return
except ProgressDialogCanceled:
msg = "File open stopped by user at line {}.".format(i)
self.main_window.statusBar().showMessage(msg)
grid.model.reset()
self.main_window.safe_mode = False
return
except Exception as err:
# A lot may got wrong with a malformed pys file, includes OSError
msg_tpl = "Error opening file {filepath}: {err}."
msg = msg_tpl.format(filepath=filepath, err=err)
self.main_window.statusBar().showMessage(msg)
# Reset grid
grid.model.reset()
self.main_window.safe_mode = False
return
# Explicitly set the grid shape
shape = code_array.shape
grid.model.shape = shape
# Update cell spans and zoom because this is unsupported by the model
for grid in self.main_window.grids:
with grid.undo_resizing_row():
with grid.undo_resizing_column():
grid.update_cell_spans()
grid.update_zoom()
# Update index widgets
grid.update_index_widgets()
# Select upper left cell because initial selection oddities
grid.reset_selection()
# Change the main window last input directory state
self.main_window.settings.last_file_input_path = filepath
self.main_window.settings.last_file_output_path = filepath
# Change the main window filepath state
self.main_window.settings.changed_since_save = False
# Update macro editor
self.main_window.macro_panel.update()
# Add to file history
self.main_window.settings.add_to_file_history(filepath.as_posix())
# Update recent files in the file menu
self.main_window.menuBar().file_menu.history_submenu.update()
return filepath
@handle_changed_since_save
def file_open(self):
"""File open workflow"""
# Get filepath from user
dial = FileOpenDialog(self.main_window)
if not dial.file_path:
return # Cancel pressed
filepath = Path(dial.file_path).with_suffix(dial.suffix)
self.filepath_open(filepath)
@handle_changed_since_save
def file_open_recent(self, filepath: Path):
"""File open recent workflow
:param filepath: Path of file to be opened
"""
self.filepath_open(Path(filepath))
def sign_file(self, filepath: Path):
"""Signs filepath if not in :attr:`model.model.DataArray.safe_mode`
:param filepath: Path of file to be signed
"""
if self.main_window.safe_mode:
msg = "File saved but not signed because it is unapproved."
self.main_window.statusBar().showMessage(msg)
return
signature_key = self.main_window.settings.signature_key
try:
with open(filepath, "rb") as infile:
signature = sign(infile.read(), signature_key)
except OSError as err:
msg = "Error signing file: {}".format(err)
self.main_window.statusBar().showMessage(msg)
return
if signature is None or not signature:
msg = 'Error signing file. '
self.main_window.statusBar().showMessage(msg)
return
signature_path = filepath.with_suffix(filepath.suffix + '.sig')
try:
with open(signature_path, 'wb') as signfile:
signfile.write(signature)
msg = "File saved and signed."
except OSError as err:
msg_tpl = "Error signing file {filepath}: {err}."
msg = msg_tpl.format(filepath=filepath, err=err)
self.main_window.statusBar().showMessage(msg)
def _save(self, filepath: Path):
"""Save filepath using chosen_filter
Compresses save file if filepath.suffix is `.pys`
:param filepath: Path of file to be saved
"""
code_array = self.main_window.grid.model.code_array
# Process events before showing the modal progress dialog
QApplication.instance().processEvents()
# Save grid to temporary file
title = "File save progress"
label = "Saving {}...".format(filepath.name)
with NamedTemporaryFile(delete=False) as tempfile:
filename = tempfile.name
try:
pys_writer = PysWriter(code_array)
try:
for _, line in file_progress_gen(
self.main_window, pys_writer, title, label,
len(pys_writer)):
line = bytes(line, "utf-8")
if filepath.suffix == ".pys":
line = bz2.compress(line)
tempfile.write(line)
except ProgressDialogCanceled:
msg = "File save stopped by user."
self.main_window.statusBar().showMessage(msg)
tempfile.delete = True # Delete incomplete tmpfile
return False
except (OSError, ValueError) as err:
tempfile.delete = True
QMessageBox.critical(self.main_window, "Error saving file",
str(err))
return False
try:
if filepath.exists() and not os.access(filepath, os.W_OK):
raise PermissionError("No write access to {}".format(filepath))
move(filename, filepath)
except OSError as err:
# No tmp file present
QMessageBox.critical(self.main_window, "Error saving file",
str(err))
return False
# Change the main window filepath state
self.main_window.settings.changed_since_save = False
# Set the current filepath
self.main_window.settings.last_file_output_path = filepath
# Change the main window title
window_title = "{filename} - pyspread".format(filename=filepath.name)
self.main_window.setWindowTitle(window_title)
# Add to file history
self.main_window.settings.add_to_file_history(filepath.as_posix())
# Update recent files in the file menu
self.main_window.menuBar().file_menu.history_submenu.update()
self.sign_file(filepath)
def file_save(self):
"""File save workflow"""
filepath = self.main_window.settings.last_file_output_path
if filepath.suffix and self._save(filepath) is not False:
return
# New, changed file that has never been saved before
# Now the user has aborted the file save as dialog or
# there was a write error
return self.file_save_as()
def file_save_as(self):
"""File save as workflow"""
# Get filepath from user
dial = FileSaveDialog(self.main_window)
if not dial.file_path:
return False # Cancel pressed
filepath = Path(dial.file_path)
# Extend filepath suffix if needed
if filepath.suffix != dial.suffix:
filepath = filepath.with_suffix(dial.suffix)
return self._save(filepath)
def file_import(self):
"""Import csv files"""
# Get filepath from user
dial = CsvFileImportDialog(self.main_window)
if not dial.file_path:
return # Cancel pressed
filepath = Path(dial.file_path)
self._csv_import(filepath)
def _csv_import(self, filepath: Path):
"""Import csv from filepath
:param filepath: Path of file to be imported
"""
filelines = self.count_file_lines(filepath)
if not filelines: # May not be None or 0
title = "CSV Import Error"
text = "File {} seems to be empty.".format(filepath)
QMessageBox.warning(self.main_window, title, text)
return
# Store file import path for next time importing a file
self.main_window.settings.last_file_import_path = filepath
digest_types = self.main_window.settings.digest_types
csv_dlg = CsvImportDialog(self.main_window, filepath,
digest_types=digest_types)
if not csv_dlg.exec():
return
self.main_window.settings.digest_types = csv_dlg.digest_types
dialect = csv_dlg.dialect
digest_types = csv_dlg.digest_types
try:
keep_header = dialect.hasheader and dialect.keepheader
except AttributeError:
keep_header = False
grid = self.main_window.focused_grid
row, column, _ = current = grid.current
model = grid.model
rows, columns, tables = model.shape
# Dialog accepted, now check if grid is large enough
csv_rows = filelines
if dialect.hasheader and not dialect.keepheader:
csv_rows -= 1
csv_columns = csv_dlg.csv_table.model.columnCount()
max_rows, max_columns = self.main_window.settings.maxshape[:2]
if csv_rows > rows - row or csv_columns > columns - column:
if csv_rows + row > max_rows or csv_columns + column > max_columns:
# Required grid size is too large
text_tpl = "The csv file {} does not fit into the grid.\n " +\
"\nIt has {} rows and {} columns. Counting from " +\
"the current cell, {} rows and {} columns would " +\
"be needed, which exeeds the maximum shape of " +\
"{} rows and {} columns. Data that does not fit " +\
"inside the grid is discarded.\n \nDo you want " +\
"to increase the grid size so that as much data " +\
"from the csv file as possible fits in?"
text = text_tpl.format(filepath, csv_rows, csv_columns,
rows-row, columns-column, max_rows,
max_columns)
else:
# Shall we resize the grid?
text_tpl = \
"The csv file {} does not fit into the grid.\n \n" +\
"It has {} rows and {} columns. Counting from the " +\
"current cell, only {} rows and {} columns remain for " +\
"CSV data.\n \nData that does not fit inside the grid " +\
"is discarded.\n \nDo you want to increase the grid " +\
"size so that all csv file data fits in?"
text = text_tpl.format(filepath, csv_rows, csv_columns,
rows-row, columns-column)
title = "CSV Content Exceeds Grid Shape"
choices = QMessageBox.No | QMessageBox.Yes | QMessageBox.Cancel
default_choice = QMessageBox.No
choice = QMessageBox.question(self.main_window, title, text,
choices, default_choice)
if choice == QMessageBox.Yes:
# Resize grid
target_rows = min(max_rows, max(csv_rows + row, rows))
target_columns = min(max_columns,
max(csv_columns + column, columns))
self._resize_grid((target_rows, target_columns, tables))
rows = target_rows
columns = target_columns
elif choice == QMessageBox.Cancel:
return
# Now fill the grid
description_tpl = "Import from csv file {} at cell {}"
description = description_tpl.format(filepath, current)
command = None
title = "csv import progress"
label = "Importing {}...".format(filepath.name)
try:
with open(filepath, newline='', encoding='utf-8') as csvfile:
try:
reader = csv_reader(csvfile, dialect)
for i, line in file_progress_gen(self.main_window, reader,
title, label, filelines):
if row + i >= rows:
break
for j, ele in enumerate(line):
if column + j >= columns:
break
if digest_types is None:
code = str(ele)
elif i == 0 and keep_header:
code = repr(ele)
else:
code = convert(ele, digest_types[j])
index = model.index(row + i, column + j)
_command = commands.SetCellCode(code, model, index,
description)
try:
command.mergeWith(_command)
except AttributeError:
command = _command
except (TypeError, ValueError) as error:
title = "CSV Import Error"
text_tpl = "Error importing csv file {path}.\n \n" + \
"{errtype}: {error}"
text = text_tpl.format(path=filepath,
errtype=type(error).__name__,
error=error)
QMessageBox.warning(self.main_window, title, text)
return
except ProgressDialogCanceled:
title = "CSV Import Stopped"
text = "Import stopped by user at line {}.".format(i)
QMessageBox.warning(self.main_window, title, text)
return
except Exception as error:
# A lot may go wrong with malformed csv files, includes OSError
title = "CSV Import Error"
text_tpl = "Error importing csv file {path}.\n \n" +\
"{errtype}: {error}"
text = text_tpl.format(path=filepath, errtype=type(error).__name__,
error=error)
QMessageBox.warning(self.main_window, title, text)
return
with self.main_window.entry_line.disable_updates():
with self.busy_cursor():
with self.prevent_updates():
self.main_window.undo_stack.push(command)
def file_export(self):
"""Export csv and svg files"""
# Determine what filters ae available
filters_list = ["CSV (*.csv)"]
grid = self.main_window.focused_grid
current = grid.current
code_array = grid.model.code_array
res = code_array[current]
if isinstance(res, QImage):
filters_list.append("JPG of current cell (*.jpg)")
if isinstance(res, QImage) \
or isinstance(res, matplotlib.figure.Figure):
filters_list.append("PNG of current cell (*.png)")
if isinstance(res, matplotlib.figure.Figure):
filters_list.append("SVG of current cell (*.svg)")
# Get filepath from user
dial = FileExportDialog(self.main_window, filters_list)
if not dial.file_path:
return # Cancel pressed
filepath = Path(dial.file_path)
# Store file export path for next time exporting a file
self.main_window.settings.last_file_export_path = filepath
if "CSV" in dial.selected_filter:
self._csv_export(filepath)
return
# Extend filepath suffix if needed
if filepath.suffix != dial.suffix:
filepath = filepath.with_suffix(dial.suffix)
if "JPG" in dial.selected_filter:
if isinstance(res, QImage):
self._qimage_export(str(filepath), file_format="jpg")
if "PNG" in dial.selected_filter:
if isinstance(res, QImage):
self._qimage_export(str(filepath), file_format="png")
elif isinstance(res, matplotlib.figure.Figure):
self._matplotlib_export(filepath, file_format="png")
elif "SVG" in dial.selected_filter:
if isinstance(res, matplotlib.figure.Figure):
self._matplotlib_export(filepath, file_format="svg")
def _csv_export(self, filepath: Path):
"""Export to csv file filepath
:param filepath: Path of file to be exported
"""
grid = self.main_window.focused_grid
# Get area for csv export
area = CsvExportAreaDialog(self.main_window, grid,
title="Csv export area").area
if area is None:
return
code_array = grid.model.code_array
table = grid.table
csv_data = code_array[area.top: area.bottom + 1,
area.left: area.right + 1, table]
csv_dlg = CsvExportDialog(self.main_window, area)
if not csv_dlg.exec():
return
try:
with open(filepath, "w", newline='', encoding='utf-8') as csvfile:
writer = csv.writer(csvfile, dialect=csv_dlg.dialect)
writer.writerows(csv_data)
except OSError as error:
self.main_window.statusBar().showMessage(str(error))
def _qimage_export(self, filepath: Path, file_format: str):
"""Export to png file filepath
:param filepath: Path of file to be exported
:param file_format: File format to be exported, e.g. png
"""
grid = self.main_window.focused_grid
code_array = grid.model.code_array
qimage = code_array[grid.current]
try:
if not qimage.save(filepath, file_format):
msg = "Could not save {}".format(filepath)
self.main_window.statusBar().showMessage(msg)
except Exception as error:
self.main_window.statusBar().showMessage(str(error))
def _matplotlib_export(self, filepath: Path, file_format: str):
"""Export to svg file filepath
:param filepath: Path of file to be exported
:param file_format: File format to be exported, e.g. png or svg
"""
if matplotlib is None:
# matplotlib is not installed
return
grid = self.main_window.focused_grid
code_array = grid.model.code_array
figure = code_array[grid.current]
try:
figure.savefig(filepath, format=file_format)
except Exception as error:
self.main_window.statusBar().showMessage(str(error))
@contextmanager
def print_zoom(self, zoom: float = 1.0):
"""Decorator for tasks that have to take place in standard zoom
:param zoom: Print zoom factor
"""
grid = self.main_window.focused_grid
__zoom = grid.zoom
grid.zoom = zoom
yield
grid.zoom = __zoom
def get_paint_rows(self, top: int, bottom: int) -> Iterable[int]:
"""Iterator of rows to paint
:param top: First row to paint
:param bottom: Last row to paint
"""
grid = self.main_window.focused_grid
rows = grid.model.shape[0]
top = max(0, min(rows - 1, top))
bottom = max(0, min(rows - 1, bottom))
if top == -1:
top = 0
if bottom == -1:
bottom = grid.model.shape[0]
return range(top, bottom + 1)
def get_paint_columns(self, left: int, right: int) -> Iterable[int]:
"""Iterator of columns to paint
:param left: First column to paint
:param right: Last column to paint
"""
grid = self.main_window.focused_grid
columns = grid.model.shape[1]
left = max(0, min(columns - 1, left))
right = max(0, min(columns - 1, right))
if left == -1:
left = 0
if right == -1:
right = grid.model.shape[1]
return range(left, right + 1)
def get_paint_tables(self, first: int, last: int) -> Iterable[int]:
"""Iterator of tables to paint
:param first: First table to paint
:param last: Last table to paint
"""
grid = self.main_window.focused_grid
tables = grid.model.shape[2]
first = max(0, min(tables - 1, first))
last = max(0, min(tables - 1, last))
if first == -1:
first = 0
if last == -1:
last = grid.model.shape[2]
return range(first, last + 1)
def get_total_height(self, top: int, bottom: int) -> float:
"""Total height of paint_rows
:param top: First row to evaluate
:param bottom: Last row to evaluate
"""
grid = self.main_window.focused_grid
rows = self.get_paint_rows(top, bottom)
return sum(grid.rowHeight(row) for row in rows)
def get_total_width(self, left: int, right: int) -> float:
"""Total height of paint_columns
:param left: First column to evaluate
:param right: Last column to evaluate
"""
grid = self.main_window.focused_grid
columns = self.get_paint_columns(left, right)
return sum(grid.columnWidth(column) for column in columns)
def paint(self, painter: QPainter, option: QStyleOptionViewItem,
paint_rect: QRectF, rows: Iterable[int], columns: Iterable[int]):
"""Grid paint workflow for printing and svg export
:param painter: Painter with which the grid is drawn
:param option: Style option for rendering
:param paint_rect: Rectangle, which is drawn at the grid borders
:param rows: Rows to be painted
:param columns: Columns to be painted
"""
grid = self.main_window.focused_grid
code_array = grid.model.code_array
cell_attributes = code_array.cell_attributes
x_offset = grid.columnViewportPosition(0)
y_offset = grid.rowViewportPosition(0)
max_width = 0
max_height = 0
for row in rows:
for column in columns:
key = row, column, grid.table
merging_cell = cell_attributes.get_merging_cell(key)
if merging_cell is None \
or merging_cell[0] == row and merging_cell[1] == column:
idx = grid.model.index(row, column)
visual_rect = grid.visualRect(idx)
x = max(0, visual_rect.x() - x_offset)
y = max(0, visual_rect.y() - y_offset)
width = visual_rect.width()
if visual_rect.x() - x_offset < 0:
width += visual_rect.x() - x_offset
height = visual_rect.height()
if visual_rect.y() - y_offset < 0:
height += visual_rect.y() - y_offset
option.rect = QRect(x, y, width, height)
option.rectf = QRectF(x, y, width, height)
max_width = max(max_width, x + width)
max_height = max(max_height, y + height)
# painter.setClipRect(option.rectf)
option.text = code_array(key)
option.widget = grid
grid.itemDelegate().paint(painter, option, idx)
# Draw outer boundary rect
painter.setPen(QPen(QBrush(Qt.gray), 2))
painter.drawRect(paint_rect)
@handle_changed_since_save
def file_quit(self):
"""Program exit workflow"""
self.main_window.settings.save()
QApplication.instance().quit()
# Edit menu
def delete(self, description_tpl: str = "Delete selection {}"):
"""Delete cells in selection
:param description_tpl: Description template for `QUndoCommand`
"""
grid = self.main_window.focused_grid
model = grid.model
selection = grid.selection
description = description_tpl.format(selection)
for row, column in selection.cell_generator(model.shape):
key = row, column, grid.table
if not grid.model.code_array.cell_attributes[key]['locked']:
# Pop item
index = model.index(row, column, QModelIndex())
command = commands.SetCellCode(None, model, index, description)
self.main_window.undo_stack.push(command)
def edit_cut(self):
"""Edit -> Cut workflow"""
self.edit_copy()
self.delete(description_tpl="Cut selection {}")
def edit_copy(self):
"""Edit -> Copy workflow
Copies selected grid code to clipboard
"""
grid = self.main_window.focused_grid
table = grid.table
selection = grid.selection
bbox = selection.get_grid_bbox(grid.model.shape)
(top, left), (bottom, right) = bbox
data = []
for row in range(top, bottom + 1):
data.append([])
for column in range(left, right + 1):
if (row, column) in selection:
code = grid.model.code_array((row, column, table))
if code is None:
code = ""
code = code.replace("\n", "\u000C") # Replace LF by FF
else:
code = ""
data[-1].append(code)
data_string = "\n".join("\t".join(line) for line in data)
clipboard = QApplication.clipboard()
clipboard.setText(data_string)
def _copy_results_current(self, grid: QTableView):
"""Copy cell results for the current cell
:param grid: Main grid
"""
current = grid.current
data = grid.model.code_array[current]
if data is None:
return
clipboard = QApplication.clipboard()
# Get renderer for current cell
renderer = grid.model.code_array.cell_attributes[current].renderer
if renderer == "text":
clipboard.setText(repr(data))
elif renderer == "image":
if isinstance(data, QImage):
clipboard.setImage(data)
else:
# We may have an svg image here
try:
svg_bytes = bytes(data)
except TypeError:
svg_bytes = bytes(data, encoding='utf-8')
if is_svg(svg_bytes):
mime_data = QMimeData()
mime_data.setData("image/svg+xml", svg_bytes)
clipboard.setMimeData(mime_data)
elif renderer == "markup":
mime_data = QMimeData()
mime_data.setHtml(str(data))
# Also copy data as plain text
doc = QTextDocument()
doc.setHtml(str(data))
mime_data.setText(doc.toPlainText())
clipboard.setMimeData(mime_data)
elif renderer == "matplotlib" and isinstance(data,
matplotlib_figure.Figure):
# We copy and svg to the clipboard
svg_filelike = io.BytesIO()
png_filelike = io.BytesIO()
data.savefig(svg_filelike, format="svg")
data.savefig(png_filelike, format="png")
svg_bytes = (svg_filelike.getvalue())
png_image = QImage().fromData(png_filelike.getvalue())
mime_data = QMimeData()
mime_data.setData("image/svg+xml", svg_bytes)
mime_data.setImageData(png_image)
clipboard.setMimeData(mime_data)
def _copy_results_selection(self, grid: QTableView):
"""Copy repr of selected cells result objects to the clipboard
:param grid: Main grid
"""
def repr_nn(ele):
"""repr which returns '' if ele is None"""
if ele is None:
return ''
return repr(ele)
table = grid.table
selection = grid.selection
bbox = selection.get_grid_bbox(grid.model.shape)
(top, left), (bottom, right) = bbox
data = grid.model.code_array[top:bottom+1, left:right+1, table]
data_string = "\n".join("\t".join(map(repr_nn, line)) for line in data)
clipboard = QApplication.clipboard()
clipboard.setText(data_string)
def edit_copy_results(self):
"""Edit -> Copy results workflow
If a selection is present then repr of selected grid cells result
objects are copied to the clipboard.
If no selection is present, the current cell results are copied to the
clipboard. This can be plain text, html, a png image or an svg image.
"""
grid = self.main_window.focused_grid
if grid.has_selection():
self._copy_results_selection(grid)
else:
self._copy_results_current(grid)
def _paste_to_selection(self, selection: Selection, data: str):
"""Pastes data into grid filling the selection
:param selection: Grid cell selection for pasting
:param data: Clipboard text
"""
grid = self.main_window.focused_grid
model = grid.model
(top, left), (bottom, right) = selection.get_grid_bbox(model.shape)
table = grid.table
code_array = grid.model.code_array
undo_stack = self.main_window.undo_stack
description_tpl = "Paste clipboard to {}"
description = description_tpl.format(selection)
command = None
paste_gen = (line.split("\t") for line in data.split("\n"))
for row, line in enumerate(cycle(paste_gen)):
paste_row = row + top
if paste_row > bottom or (paste_row, 0, table) not in code_array:
break
for column, value in enumerate(cycle(line)):
paste_column = column + left
paste_key = paste_row, paste_column, table
if (paste_key in code_array
and paste_column <= right):
if ((paste_row, paste_column) in selection and not
code_array.cell_attributes[paste_key].locked):
index = model.index(paste_row, paste_column,
QModelIndex())
# Preserve line breaks
value = value.replace("\u000C", "\n")
cmd = commands.SetCellCode(value, model, index,
description)
if command is None:
command = cmd
else:
command.mergeWith(cmd)
else:
break
undo_stack.push(command)
def _paste_to_current(self, data: str):
"""Pastes data into grid starting from the current cell
:param data: Clipboard text
"""
grid = self.main_window.focused_grid
model = grid.model
top, left, table = current = grid.current
code_array = grid.model.code_array
undo_stack = self.main_window.undo_stack
description_tpl = "Paste clipboard starting from cell {}"
description = description_tpl.format(current)
command = None
paste_gen = (line.split("\t") for line in data.split("\n"))
for row, line in enumerate(paste_gen):
paste_row = row + top
if (paste_row, 0, table) not in code_array:
break
for column, value in enumerate(line):
paste_column = column + left
if (paste_row, paste_column, table) in code_array:
index = model.index(paste_row, paste_column, QModelIndex())
# Preserve line breaks
value = value.replace("\u000C", "\n")
cmd = commands.SetCellCode(value, model, index,
description)
if command is None:
command = cmd
else:
command.mergeWith(cmd)
else:
break
undo_stack.push(command)
def edit_paste(self):
"""Edit -> Paste workflow
Pastes text clipboard data
If no selection is present, data is pasted starting with the current
cell. If a selection is present, data is pasted fully if the selection
is smaller. If the selection is larger then data is duplicated.
"""
grid = self.main_window.focused_grid
clipboard = QApplication.clipboard()
data = clipboard.text()
if data:
# Change the main window filepath state
self.main_window.settings.changed_since_save = True
with self.busy_cursor():
if grid.has_selection():
self._paste_to_selection(grid.selection, data)
else:
self._paste_to_current(data)
def _paste_svg(self, svg: str, index: QModelIndex):
"""Pastes svg image into cell
:param svg: SVG data
:param index: Target cell index
"""
grid = self.main_window.focused_grid
codelines = svg.splitlines()
codelines[0] = '"""' + codelines[0]
codelines[-1] = codelines[-1] + '"""'
code = "\n".join(codelines)
model = grid.model
description = "Insert svg image into cell {}".format(index)
grid.on_image_renderer_pressed(True)
with self.main_window.entry_line.disable_updates():
command = commands.SetCellCode(code, model, index, description)
self.main_window.undo_stack.push(command)
def _paste_image(self, image_data: bytes, index: QModelIndex):
"""Pastes svg image into cell
:param image_data: Raw image data. May be anything that QImage handles.
:param index: Target cell index
"""
def gen_chunk(string: str, length: int) -> Iterable[str]:
"""Generator for chunks of string
:param string: String to be chunked
:param length: Chunk length
"""
for i in range(0, len(string), length):
yield string[i:i+length]
grid = self.main_window.focused_grid
repr_image_data = repr(b85encode(bz2.compress(image_data)))
newline = "'\n+b'"
image_data_str = newline.join(gen_chunk(repr_image_data, 8000))
code_lines = [
"data = bz2.decompress(base64.b85decode(",
image_data_str,
"))",
"qimg = QImage()",
"QImage.loadFromData(qimg, data)",
"qimg",
]
code = "\n".join(code_lines)
model = grid.model
description = "Insert image into cell {}".format(index)
grid.on_image_renderer_pressed(True)
with self.main_window.entry_line.disable_updates():
command = commands.SetCellCode(code, model, index, description)
self.main_window.undo_stack.push(command)
def edit_paste_as(self):
"""Pastes clipboard into one cell using a user specified mime type"""
grid = self.main_window.focused_grid
model = grid.model
# The mimetypes that are supported by pyspread
mimetypes = ("image", "text/html", "text/plain")
clipboard = QApplication.clipboard()
formats = clipboard.mimeData().formats()
items = [fmt for fmt in formats if any(m in fmt for m in mimetypes)]
if not items:
return
elif len(items) == 1:
item = items[0]
else:
item, ok = QInputDialog.getItem(self.main_window, "Paste as",
"Choose mime type", items,
current=0, editable=False)
if not ok:
return
row, column, _ = current = grid.current # Target cell key
description_tpl = "Paste {} from clipboard into cell {}"
description = description_tpl.format(item, current)
index = model.index(row, column, QModelIndex())
mime_data = clipboard.mimeData()
if item == "image/svg+xml":
# SVG Image
if mime_data:
svg = mime_data.data("image/svg+xml")
self._paste_svg(str(svg, encoding='utf-8'), index)
elif "image" in item and mime_data.hasImage():
# Bitmap Image
image = clipboard.image()
buffer = QBuffer()
buffer.open(QBuffer.ReadWrite)
image.save(buffer, "PNG")
buffer.seek(0)
image_data = buffer.readAll()
buffer.close()
self._paste_image(image_data, index)
elif item == "text/html" and mime_data.hasHtml():
# HTML content
html = mime_data.html()
command = commands.SetCellCode(html, model, index, description)
self.main_window.undo_stack.push(command)
grid.on_markup_renderer_pressed(True)
elif item == "text/plain":
# Normal code
code = clipboard.text()
if code:
command = commands.SetCellCode(code, model, index, description)
self.main_window.undo_stack.push(command)
else:
# Unknown mime type
return NotImplemented
def edit_find(self):
"""Edit -> Find workflow, opens FindDialog"""
find_dialog = FindDialog(self.main_window)
find_dialog.show()
find_dialog.raise_()
find_dialog.activateWindow()
def _get_next_match(self, find_dialog: FindDialog,
start_key: Tuple[int, int, int] = None):
"""Returns tuple of find string and next matching cell key
:param find_dialog: Find dialog from which the search origins
:param start_key: Start search at given key
"""
grid = self.main_window.focused_grid
findnextmatch = grid.model.code_array.findnextmatch
find_editor = find_dialog.search_text_editor
find_string = find_editor.text()
if start_key is not None:
pass
elif find_dialog.from_start_checkbox.isChecked():
start_key = 0, 0, grid.table
elif find_dialog.backward_checkbox.isChecked():
start_key = grid.row - 1, grid.column, grid.table
else:
start_key = grid.row + 1, grid.column, grid.table
match = findnextmatch(start_key, find_string,
up=find_dialog.backward_checkbox.isChecked(),
word=find_dialog.word_checkbox.isChecked(),
case=find_dialog.case_checkbox.isChecked(),
regexp=find_dialog.regex_checkbox.isChecked(),
results=find_dialog.results_checkbox.isChecked())
return find_string, match
def _display_match_msg(self, find_string: str, next_match: str,
regexp: str):
"""Displays find match message in statusbar
:param find_string: Message component
:param next_match: Message component
:param regexp: Message component
"""
str_name = "Regular expression" if regexp else "String"
msg_tpl = "{str_name} {find_string} found in cell {next_match}."
msg = msg_tpl.format(str_name=str_name,
find_string=find_string,
next_match=next_match)
self.main_window.statusBar().showMessage(msg)
def find_dialog_on_find(self, find_dialog: FindDialog):
"""Edit -> Find workflow, after pressing find button in FindDialog
:param find_dialog: Find dialog of origin
"""
grid = self.main_window.focused_grid
find_string, next_match = self._get_next_match(find_dialog)
if next_match:
grid.current = next_match
regexp = find_dialog.regex_checkbox.isChecked()
self._display_match_msg(find_string, next_match, regexp)
if find_dialog.from_start_checkbox.isChecked():
find_dialog.from_start_checkbox.setChecked(False)
def edit_find_next(self):
"""Edit -> Find next workflow"""
grid = self.main_window.focused_grid
findnextmatch = grid.model.code_array.findnextmatch
find_editor = self.main_window.find_toolbar.find_editor
find_string = find_editor.text()
if find_editor.up:
start_key = grid.row - 1, grid.column, grid.table
else:
start_key = grid.row + 1, grid.column, grid.table
next_match = findnextmatch(start_key, find_string,
up=find_editor.up,
word=find_editor.word,
case=find_editor.case,
regexp=find_editor.regexp,
results=find_editor.results)
if next_match:
grid.current = next_match
self._display_match_msg(find_string, next_match,
find_editor.regexp)
def edit_replace(self):
"""Edit -> Replace workflow, opens ReplaceDialog"""
find_dialog = ReplaceDialog(self.main_window)
find_dialog.show()
find_dialog.raise_()
find_dialog.activateWindow()
def _get_replace_command(self, next_match: Tuple[int, int, int],
find_string: str,
replace_string: str,
max_: int = 1,
description: str = None) -> QUndoCommand:
"""Returns SetCellCode command for replace operations
:param next_match: Key of next matching cell
:param find_string: String to find
:param replace_string: Replacement string
:param max_: Maximum number of replace actions, -1 is unlimited
:param description: Forced command description string
"""
model = self.main_window.focused_grid.model
old_code = model.code_array(next_match)
new_code = old_code.replace(find_string, replace_string, max_)
if description is None:
description_tpl = "Replaced {old} with {new} in cell {key}."
description = description_tpl.format(old=old_code, new=new_code,
key=next_match)
index = model.index(*next_match[:2])
return commands.SetCellCode(new_code, model, index, description)
def replace_dialog_on_replace(self, replace_dialog: ReplaceDialog,
toggled: bool = False,
max_: int = 1) -> bool:
"""Edit -> Replace workflow when pushing Replace in ReplaceDialog
Returns True if there is a match otherwise False
:param replace_dialog: Replace dialog of origin
:param toggled: Replace dialog toggle state
:param max_: Maximum number of replace actions, -1 is unlimited
"""
grid = self.main_window.focused_grid
find_string, next_match = self._get_next_match(replace_dialog)
replace_string = replace_dialog.replace_text_editor.text()
if next_match:
command = self._get_replace_command(next_match, find_string,
replace_string, max_=max_)
self.main_window.undo_stack.push(command)
grid.current = next_match
self.main_window.statusBar().showMessage(command.description)
if replace_dialog.from_start_checkbox.isChecked():
replace_dialog.from_start_checkbox.setChecked(False)
return True
return False
def replace_dialog_on_replace_all(self, replace_dialog: ReplaceDialog):
"""Edit -> Replace workflow when pushing ReplaceAll in ReplaceDialog
:param replace_dialog: Replace dialog of origin
"""
replace_string = replace_dialog.replace_text_editor.text()
command = None
replaced = []
next_match = 0, 0, 0
with self.busy_cursor():
with self.main_window.entry_line.disable_updates():
with self.prevent_updates():
while True:
# TODO: ABORT ON USER REQUEST
find_string, next_match = \
self._get_next_match(replace_dialog,
start_key=(next_match[0]+1,
next_match[1],
next_match[2]))
if not next_match or next_match in replaced:
break
replaced.append(next_match)
msg = "Replace all {} by {}".format(find_string,
replace_string)
_command = self._get_replace_command(next_match,
find_string,
replace_string,
max_=-1,
description=msg)
if command is None:
command = _command
else:
command.mergeWith(_command)
self.main_window.undo_stack.push(command)
msg_tpl = "{} replaced by {} in {} cells."
msg = msg_tpl.format(find_string, replace_string, len(replaced))
self.main_window.statusBar().showMessage(msg)
def edit_resize(self):
"""Edit -> Resize workflow"""
grid = self.main_window.focused_grid
# Get grid shape from user
old_shape = grid.model.code_array.shape
title = "Resize grid"
shape = GridShapeDialog(self.main_window, old_shape, title=title).shape
self._resize_grid(shape)
def _resize_grid(self, shape: Tuple[int, int, int]):
"""Resize grid
:param shape: New grid shape
"""
grid = self.main_window.focused_grid
old_shape = grid.model.code_array.shape
# Check if shape is valid
try:
check_shape_validity(shape, self.main_window.settings.maxshape)
except ValueError as err:
self.main_window.statusBar().showMessage('Error: ' + str(err))
return
grid.current = 0, 0, 0
description = "Resize grid to {}".format(shape)
with self.main_window.entry_line.disable_updates():
command = commands.SetGridSize(grid, old_shape, shape, description)
self.main_window.undo_stack.push(command)
# Select upper left cell because initial selection behaves strangely
grid.reset_selection()
# View menu
def view_goto_cell(self):
"""View -> Go to cell workflow"""
grid = self.main_window.focused_grid
# Get cell key from user
shape = grid.model.shape
key = CellKeyDialog(self.main_window, shape).key
if key is not None:
grid.current = key
# Format menu
def format_copy_format(self):
"""Copies the format of the selected cells to the Clipboard
Cells are shifted so that the top left bbox corner is at 0,0
"""
def remove_tabu_keys(attrs: AttrDict):
"""Remove keys that are not copied from attr
:param attr: Attribute dict that holds cell attributes
"""
tabu_attrs = "merge_area", "frozen"
for tabu_attr in tabu_attrs:
try:
attrs.pop(tabu_attr)
except KeyError:
pass
grid = self.main_window.focused_grid
code_array = grid.model.code_array
cell_attributes = code_array.cell_attributes
# Cell attributes
new_cell_attributes = []
selection = grid.selection
# Format content is shifted so that the top left corner is 0,0
(top, left), (bottom, right) = \
selection.get_grid_bbox(grid.model.shape)
table_cell_attributes = cell_attributes.for_table(grid.table)
for __selection, _, attrs in table_cell_attributes:
new_selection = selection & __selection
if new_selection:
# We do not copy merged cells and cell renderers
remove_tabu_keys(attrs)
new_shifted_selection = new_selection.shifted(-top, -left)
cell_attribute = new_shifted_selection.parameters, attrs
new_cell_attributes.append(cell_attribute)
ca_repr = bytes(repr(new_cell_attributes), encoding='utf-8')
clipboard = QApplication.clipboard()
mime_data = QMimeData()
mime_data.setData("application/x-pyspread-cell-attributes", ca_repr)
clipboard.setMimeData(mime_data)
def format_paste_format(self):
"""Pastes cell formats
Pasting starts at cursor or at top left bbox corner
"""
clipboard = QApplication.clipboard()
mime_data = clipboard.mimeData()
grid = self.main_window.focused_grid
model = grid.model
row, column, table = grid.current
if "application/x-pyspread-cell-attributes" not in mime_data.formats():
return
cas_data = mime_data.data("application/x-pyspread-cell-attributes")
cas_data_str = str(cas_data, encoding='utf-8')
| cas = literal_eval(cas_data_str) | 5,088 | lcc_e | python | null | bb1bd3c077e648b3344f219ca187ca7214cc79803cf4e4c9 |
|
#!/usr/bin/env python
# py2/3 compatibility
from __future__ import print_function
try:
from builtins import range, bytes
from itertools import izip, chain
except ImportError:
from itertools import chain
izip = zip
# standard lib imports
import os
import glob
import time
import shutil
import pickle
from collections import Counter
# third party imports
import numpy as np
import pandas as pd
import ipyrad
from numba import njit
from .utils import IPyradError, clustdealer, splitalleles, chroms2ints
from .utils import BTS, GETCONS, DCONS # , bcomp
# suppress the terrible h5 warning
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
# classes
class Step7:
def __init__(self, data, force, ipyclient):
self.data = data
self.force = force
self.ipyclient = ipyclient
self.lbview = self.ipyclient.load_balanced_view()
self.data.isref = bool("ref" in self.data.params.assembly_method)
self.data.ispair = bool("pair" in self.data.params.datatype)
# returns samples in the order we want them in the outputs
self.print_headers()
self.samples = self.get_subsamples()
self.setup_dirs()
self.get_chunksize()
# dict mapping of samples to padded names for loci file aligning.
self.data.snames = [i.name for i in self.samples]
self.data.pnames, self.data.snppad = self.get_padded_names()
# output file formats to produce ('l' is required).
self.formats = set(['l']).union(
set(self.data.params.output_formats))
def run(self):
# split clusters into bits.
self.split_clusters()
# get filter and snp info on edge trimmed data.
# write to chunks for building output files and save dimensions.
self.remote_process_chunks()
# write stats file while counting nsnps and nbases.
self.collect_stats()
self.store_file_handles()
# write loci and alleles outputs (parallelized on 3 engines)
self.remote_build_arrays_and_write_loci()
# send conversion jobs from array files to engines
self.remote_write_outfiles()
# send jobs to build vcf
if 'v' in self.formats:
# throttle job to avoid memory errors based on catg size
self.remote_fill_depths()
self.remote_build_vcf()
# cleanup
if os.path.exists(self.data.tmpdir):
shutil.rmtree(self.data.tmpdir)
def print_headers(self):
if self.data._cli:
self.data._print(
"\n{}Step 7: Filtering and formatting output files "
.format(self.data._spacer)
)
def get_subsamples(self):
"get subsamples for this assembly. All must have been in step6"
# bail out if no samples ready
if not hasattr(self.data.stats, "state"):
raise IPyradError("No samples ready for step 7")
# get samples from the database file
if not os.path.exists(self.data.clust_database):
raise IPyradError("You must first complete step6.")
with open(self.data.clust_database, 'r') as inloci:
dbsamples = inloci.readline()[1:].strip().split(",@")
# samples are in this assembly but not database (raise error)
nodb = set(self.data.samples).difference(set(dbsamples))
if nodb:
raise IPyradError(MISSING_SAMPLE_IN_DB.format(nodb))
# samples in database not in this assembly, that's OK, you probably
# branched to drop some samples.
# samples in populations file that are not in this assembly. Raise
# an error, it's probably a typo and should be corrected.
poplists = [i[1] for i in self.data.populations.values()]
popset = set(chain(*poplists))
badpop = popset.difference(set(self.data.samples))
if badpop:
raise IPyradError(BADPOP_SAMPLES.format(badpop))
# output files already exist for this assembly. Raise
# error unless using the force flag to prevent overwriting.
if not self.force:
_outdir = os.path.join(
self.data.params.project_dir,
"{}_outfiles".format(self.data.name),
)
_outdir = os.path.realpath(_outdir)
if os.path.exists(os.path.join(_outdir,
"{}.loci".format(self.data.name),
)):
raise IPyradError(
"Step 7 results exist for this Assembly. Use force to overwrite.")
# if ref init a new sample for reference if including
if self.data.params.assembly_method == 'reference':
ref = ipyrad.core.sample.Sample("reference")
samples = [ref] + sorted(
list(set(self.data.samples.values())),
key=lambda x: x.name)
return samples
else:
samples = sorted(
list(set(self.data.samples.values())),
key=lambda x: x.name)
return samples
def setup_dirs(self):
"Create temp h5 db for storing filters and depth variants"
# reset outfiles paths
for key in self.data.outfiles:
self.data.outfiles[key] = ""
# make new output directory
self.data.dirs.outfiles = os.path.join(
self.data.params.project_dir,
"{}_outfiles".format(self.data.name),
)
self.data.dirs.outfiles = os.path.realpath(self.data.dirs.outfiles)
if os.path.exists(self.data.dirs.outfiles):
shutil.rmtree(self.data.dirs.outfiles)
if not os.path.exists(self.data.dirs.outfiles):
os.makedirs(self.data.dirs.outfiles)
# stats output handle
self.data.stats_files.s7 = os.path.abspath(
os.path.join(
self.data.dirs.outfiles,
"{}_stats.txt".format(self.data.name),
)
)
# make tmpdir directory
self.data.tmpdir = os.path.join(
self.data.dirs.outfiles,
"tmpdir",
)
if os.path.exists(self.data.tmpdir):
shutil.rmtree(self.data.tmpdir)
if not os.path.exists(self.data.tmpdir):
os.makedirs(self.data.tmpdir)
# make new database files
self.data.seqs_database = os.path.join(
self.data.dirs.outfiles,
self.data.name + ".seqs.hdf5",
)
self.data.snps_database = os.path.join(
self.data.dirs.outfiles,
self.data.name + ".snps.hdf5",
)
for dbase in [self.data.snps_database, self.data.seqs_database]:
if os.path.exists(dbase):
os.remove(dbase)
def get_chunksize(self):
"get nloci and ncpus to chunk and distribute work across processors"
# this file is inherited from step 6 to allow step7 branching.
with open(self.data.clust_database, 'r') as inloci:
# skip header
inloci.readline()
# get nraw loci
self.nraws = sum(1 for i in inloci if i == "//\n") // 2
# chunk to approximately 4 chunks per core
self.ncpus = len(self.ipyclient.ids)
self.chunksize = sum([
(self.nraws // (self.ncpus * 4)),
(self.nraws % (self.ncpus * 4)),
])
def get_padded_names(self):
# get longest name
longlen = max(len(i) for i in self.data.snames)
# Padding distance between name and seq.
padding = 5
# add pad to names
pnames = {
name: "{}{}".format(name, " " * (longlen - len(name) + padding))
for name in self.data.snames
}
snppad = "//" + " " * (longlen - 2 + padding)
return pnames, snppad
def store_file_handles(self):
# always produce a .loci file + whatever they ask for.
testformats = list(self.formats)
for outf in testformats:
# if it requires a pop file and they don't have one then skip
# and write the warning to the expected file, to prevent an
# annoying message every time if you don't have a pops file, but
# still to be transparent about skipping some files. This caused
# me some real amount of pain, like "why isnt' the treemix file
# being created, fudkckkk!!!1" And then like 10 minutes later, oh
# yeah, no pops file, fml. 3/2020 iao.
if (outf in ("t", "m")) and (not self.data.populations):
outfile = os.path.join(
self.data.dirs.outfiles,
self.data.name + OUT_SUFFIX[outf][0])
with open(outfile, 'w') as out:
out.write(POPULATION_REQUIRED.format(outf))
# remove format from the set
self.formats.discard(outf)
continue
else:
# store handle to data object
for ending in OUT_SUFFIX[outf]:
# store
self.data.outfiles[ending[1:]] = os.path.join(
self.data.dirs.outfiles,
self.data.name + ending)
def collect_stats(self):
"Collect results from Processor and write stats file."
# organize stats into dataframes
ftable = pd.DataFrame(
columns=["total_filters", "applied_order", "retained_loci"],
index=[
"total_prefiltered_loci",
"filtered_by_rm_duplicates",
"filtered_by_max_indels",
"filtered_by_max_SNPs",
"filtered_by_max_shared_het",
"filtered_by_min_sample", # "filtered_by_max_alleles",
"total_filtered_loci"],
)
# load pickled dictionaries into a dict
pickles = glob.glob(os.path.join(self.data.tmpdir, "*.p"))
pdicts = {}
for pkl in pickles:
with open(pkl, 'rb') as inp:
pdicts[pkl.rsplit("-", 1)[-1][:-2]] = pickle.load(inp)
# join dictionaries into global stats
afilts = np.concatenate([i['filters'] for i in pdicts.values()])
lcovs = Counter({})
scovs = Counter({})
cvar = Counter({})
cpis = Counter({})
nbases = 0
for lcov in [i['lcov'] for i in pdicts.values()]:
lcovs.update(lcov)
for scov in [i['scov'] for i in pdicts.values()]:
scovs.update(scov)
for var in [i['var'] for i in pdicts.values()]:
cvar.update(var)
for pis in [i['pis'] for i in pdicts.values()]:
cpis.update(pis)
for count in [i['nbases'] for i in pdicts.values()]:
nbases += count
# make into nice DataFrames
ftable.iloc[0, :] = (0, 0, self.nraws)
# filter rm dups
ftable.iloc[1, 0:2] = afilts[:, 0].sum()
ftable.iloc[1, 2] = ftable.iloc[0, 2] - ftable.iloc[1, 1]
mask = afilts[:, 0]
# filter max indels
ftable.iloc[2, 0] = afilts[:, 1].sum()
ftable.iloc[2, 1] = afilts[~mask, 1].sum()
ftable.iloc[2, 2] = ftable.iloc[1, 2] - ftable.iloc[2, 1]
mask = afilts[:, 0:2].sum(axis=1).astype(np.bool)
# filter max snps
ftable.iloc[3, 0] = afilts[:, 2].sum()
ftable.iloc[3, 1] = afilts[~mask, 2].sum()
ftable.iloc[3, 2] = ftable.iloc[2, 2] - ftable.iloc[3, 1]
mask = afilts[:, 0:3].sum(axis=1).astype(np.bool)
# filter max shared H
ftable.iloc[4, 0] = afilts[:, 3].sum()
ftable.iloc[4, 1] = afilts[~mask, 3].sum()
ftable.iloc[4, 2] = ftable.iloc[3, 2] - ftable.iloc[4, 1]
mask = afilts[:, 0:4].sum(axis=1).astype(np.bool)
# filter minsamp
ftable.iloc[5, 0] = afilts[:, 4].sum()
ftable.iloc[5, 1] = afilts[~mask, 4].sum()
ftable.iloc[5, 2] = ftable.iloc[4, 2] - ftable.iloc[5, 1]
mask = afilts[:, 0:4].sum(axis=1).astype(np.bool)
ftable.iloc[6, 0] = ftable.iloc[:, 0].sum()
ftable.iloc[6, 1] = ftable.iloc[:, 1].sum()
ftable.iloc[6, 2] = ftable.iloc[5, 2]
# save stats to the data object
self.data.stats_dfs.s7_filters = ftable
self.data.stats_dfs.s7_samples = pd.DataFrame(
pd.Series(scovs, name="sample_coverage"))
## get locus cov and sums
lrange = range(1, len(self.samples) + 1)
covs = pd.Series(lcovs, name="locus_coverage", index=lrange)
start = self.data.params.min_samples_locus - 1
sums = pd.Series(
{i: np.sum(covs[start:i]) for i in lrange},
name="sum_coverage",
index=lrange)
self.data.stats_dfs.s7_loci = pd.concat([covs, sums], axis=1)
# fill pis to match var
for i in cvar:
if not cpis.get(i):
cpis[i] = 0
## get SNP distribution
sumd = {}
sump = {}
for i in range(max(cvar.keys()) + 1):
sumd[i] = np.sum([i * cvar[i] for i in range(i + 1)])
sump[i] = np.sum([i * cpis[i] for i in range(i + 1)])
self.data.stats_dfs.s7_snps = pd.concat([
pd.Series(cvar, name="var"),
pd.Series(sumd, name="sum_var"),
pd.Series(cpis, name="pis"),
pd.Series(sump, name="sum_pis"),
],
axis=1
)
# trim SNP distribution to exclude unobserved endpoints
snpmax = np.where(
np.any(
self.data.stats_dfs.s7_snps.loc[:, ["var", "pis"]] != 0, axis=1
)
)[0]
if snpmax.size:
snpmax = snpmax.max()
self.data.stats_dfs.s7_snps = (
self.data.stats_dfs.s7_snps.loc[:snpmax])
## store dimensions for array building
self.nloci = ftable.iloc[6, 2]
self.nbases = nbases
self.nsnps = self.data.stats_dfs.s7_snps["sum_var"].max()
self.ntaxa = len(self.samples)
# write to file
with open(self.data.stats_files.s7, 'w') as outstats:
print(STATS_HEADER_1, file=outstats)
self.data.stats_dfs.s7_filters.to_string(buf=outstats)
print(STATS_HEADER_2, file=outstats)
self.data.stats_dfs.s7_samples.to_string(buf=outstats)
print(STATS_HEADER_3, file=outstats)
self.data.stats_dfs.s7_loci.to_string(buf=outstats)
print(STATS_HEADER_4, file=outstats)
self.data.stats_dfs.s7_snps.to_string(buf=outstats)
print("\n\n\n## Final Sample stats summary", file=outstats)
statcopy = self.data.stats.copy()
statcopy.state = 7
statcopy['loci_in_assembly'] = self.data.stats_dfs.s7_samples
statcopy.to_string(buf=outstats)
print("\n\n\n## Alignment matrix statistics:", file=outstats)
# bail out here if no loci were found
if not self.nloci:
raise IPyradError("No loci passed filters.")
def split_clusters(self):
with open(self.data.clust_database, 'rb') as clusters:
# skip header
clusters.readline()
# build iterator
pairdealer = izip(*[iter(clusters)] * 2)
# grab a chunk of clusters
idx = 0
while 1:
# if an engine is available pull off a chunk
try:
done, chunk = clustdealer(pairdealer, self.chunksize)
except IndexError:
raise IPyradError(
"clust_database formatting error in %s", chunk)
# write to tmpdir and increment counter
if chunk:
chunkpath = os.path.join(
self.data.tmpdir,
"chunk-{}".format(idx),
)
with open(chunkpath, 'wb') as outfile:
outfile.write(b"//\n//\n".join(chunk))
idx += 1
# break on final chunk
if done:
break
def remote_process_chunks(self):
"""
Calls process_chunk() function in parallel.
"""
start = time.time()
printstr = ("applying filters ", "s7")
rasyncs = {}
jobs = glob.glob(os.path.join(self.data.tmpdir, "chunk-*"))
jobs = sorted(jobs, key=lambda x: int(x.rsplit("-")[-1]))
for jobfile in jobs:
args = (self.data, self.chunksize, jobfile)
rasyncs[jobfile] = self.lbview.apply(process_chunk, *args)
# iterate until all chunks are processed
while 1:
# get and enter results into hdf5 as they come in
ready = [rasyncs[i].ready() for i in rasyncs]
self.data._progressbar(len(ready), sum(ready), start, printstr)
time.sleep(0.5)
if len(ready) == sum(ready):
self.data._print("")
break
# write stats
for job in rasyncs:
if not rasyncs[job].successful():
rasyncs[job].get()
def remote_build_arrays_and_write_loci(self):
"""
Calls write_loci_and_alleles(), fill_seq_array() and fill_snp_array().
"""
# start loci concatenating job on a remote
start = time.time()
printstr = ("building arrays ", "s7")
rasyncs = {}
args1 = (self.data, self.ntaxa, self.nbases, self.nloci)
args2 = (self.data, self.ntaxa, self.nsnps)
# print(self.nbases)
# fill with filtered loci chunks from Processor
rasyncs[0] = self.lbview.apply(write_loci_and_alleles, self.data)
rasyncs[1] = self.lbview.apply(fill_seq_array, *args1)
# fill with filtered loci chunks but also applies min_samples_SNP
rasyncs[2] = self.lbview.apply(fill_snp_array, *args2)
# track progress.
while 1:
ready = [rasyncs[i].ready() for i in rasyncs]
self.data._progressbar(len(ready), sum(ready), start, printstr)
time.sleep(0.5)
if len(ready) == sum(ready):
self.data._print("")
break
# check for errors
for job in rasyncs:
if not rasyncs[job].successful():
rasyncs[job].get()
def remote_write_outfiles(self):
"""
Calls convert_outputs() in parallel.
"""
start = time.time()
printstr = ("writing conversions ", "s7")
rasyncs = {}
for outf in self.formats:
rasyncs[outf] = self.lbview.apply(
convert_outputs, *(self.data, outf))
# iterate until all chunks are processed
while 1:
ready = [rasyncs[i].ready() for i in rasyncs]
self.data._progressbar(len(ready), sum(ready), start, printstr)
time.sleep(0.5)
if len(ready) == sum(ready):
self.data._print("")
break
# write stats
for job in rasyncs:
if not rasyncs[job].successful():
try:
rasyncs[job].get()
except Exception as inst:
# Allow one file to fail without breaking all step 7
# but print out the error and some info
print(inst)
def remote_fill_depths(self):
"""
Call fill_vcf_depths() in parallel.
"""
start = time.time()
printstr = ("indexing vcf depths ", "s7")
rasyncs = {}
for sample in self.data.samples.values():
if not sample.name == "reference":
rasyncs[sample.name] = self.lbview.apply(
fill_vcf_depths, *(self.data, self.nsnps, sample))
# iterate until all chunks are processed
while 1:
ready = [rasyncs[i].ready() for i in rasyncs]
self.data._progressbar(len(ready), sum(ready), start, printstr)
time.sleep(0.5)
if len(ready) == sum(ready):
self.data._print("")
break
# write stats
for job in rasyncs:
if not rasyncs[job].successful():
rasyncs[job].get()
def remote_build_vcf(self):
"""
Calls build_vcf() in parallel.
"""
start = time.time()
printstr = ("writing vcf output ", "s7")
rasync = self.lbview.apply(build_vcf, self.data)
# iterate until all chunks are processed
while 1:
ready = rasync.ready()
self.data._progressbar(1, ready, start, printstr)
time.sleep(0.5)
if ready:
self.data._print("")
break
# write stats
if not rasync.successful():
rasync.get()
# ------------------------------------------------------------
# Classes initialized and run on remote engines.
# ------------------------------------------------------------
def process_chunk(data, chunksize, chunkfile):
# process chunk writes to files and returns proc with features.
proc = Processor(data, chunksize, chunkfile)
proc.run()
# check for variants or set max to 0
try:
mvar = max([i for i in proc.var if proc.var[i]])
except ValueError:
mvar = 0
try:
mpis = max([i for i in proc.pis if proc.pis[i]])
except ValueError:
mpis = 0
# shorten dictionaries
proc.var = {i: j for (i, j) in proc.var.items() if i <= mvar}
proc.pis = {i: j for (i, j) in proc.pis.items() if i <= mpis}
# write process stats to a pickle file for collating later.
# We have to write stats for each process, even if it returns
# no loci in order for the filtering stats to make sense.
# https://github.com/dereneaton/ipyrad/issues/358
out = {
"filters": proc.filters,
"lcov": proc.lcov,
"scov": proc.scov,
"var": proc.var,
"pis": proc.pis,
"nbases": proc.nbases
}
with open(proc.outpickle, 'wb') as outpickle:
pickle.dump(out, outpickle)
##############################################################
class Processor(object):
def __init__(self, data, chunksize, chunkfile):
"""
Takes a chunk of aligned loci and (1) applies filters to it;
(2) gets edges, (3) builds snpstring, (4) returns chunk and stats.
(5) writes
"""
# init data
self.data = data
self.chunksize = chunksize
self.chunkfile = chunkfile
self.isref = self.data.isref
self.ispair = self.data.ispair
self.minsamp = self.data.params.min_samples_locus
# Minsamp is calculated _before_ the reference sequence is removed
# and so if we want the minsamp param to be honored as it is written
# in the params file we need to _add_ 1 to the value, so that when
# the ref is excluded the minsamp value will be accurate.
# If the ref is _included_ then it counts toward minsample and no
# adjustment is necessary.
if self.isref:
if self.data.hackersonly.exclude_reference:
self.minsamp += 1
# filters (dups, minsamp, maxind, maxall, maxvar, maxshared)
self.filters = np.zeros((self.chunksize, 5), dtype=np.bool_)
self.filterlabels = (
'dups',
'maxind',
'maxvar',
'maxshared',
'minsamp',
)
# (R1>, <R1, R2>, <R2)
self.edges = np.zeros((self.chunksize, 4), dtype=np.uint16)
# check filter settings
self.fmaxsnps = self.data.params.max_SNPs_locus
if isinstance(self.fmaxsnps, tuple):
self.fmaxsnps = self.fmaxsnps[0]
if isinstance(self.fmaxsnps, int):
self.fmaxsnps = 0.10 # backwards compatibility make as a float
self.fmaxhet = self.data.params.max_shared_Hs_locus
if isinstance(self.fmaxhet, tuple):
self.fmaxhet = self.fmaxhet[0]
# TODO: This backwards compatibility is hard coded. Maybe better to
# just raise an error here, or really during parsing of the params
# file is best.
if isinstance(self.fmaxhet, int):
self.fmaxhet = 0.5 # backwards compatibility make as a float
self.maxinds = self.data.params.max_Indels_locus
if isinstance(self.maxinds, tuple):
self.maxinds = self.maxinds[0] # backwards compatibility
# store stats on sample coverage and locus coverage
self.scov = {i: 0 for i in self.data.snames}
self.lcov = {i: 0 for i in range(1, len(self.data.snames) + 1)}
self.var = {i: 0 for i in range(5000)}
self.pis = {i: 0 for i in range(5000)}
self.nbases = 0
# tmp outfile list and filename
self.outlist = []
self.outfile = self.chunkfile + '.loci'
self.outpickle = self.chunkfile + '.p'
self.outarr = self.chunkfile + '.npy'
# open a generator to the chunks
self.io = open(self.chunkfile, 'rb')
self.loci = enumerate(iter(self.io.read().split(b"//\n//\n")))
# filled in each chunk
self.names = []
self.nidxs = []
self.aseqs = []
self.useqs = []
def next_locus(self):
self.names = []
self.nidxs = []
self.aseqs = []
self.useqs = []
# advance locus to next, parse names and seqs
self.iloc, lines = next(self.loci)
lines = lines.decode().strip().split("\n")
for line in lines:
if line[0] == ">":
name, nidx = line[1:].rsplit("_", 1)
self.names.append(name)
self.nidxs.append(nidx)
else:
self.aseqs.append(list(bytes(line.encode())))
self.useqs.append(list(bytes(line.upper().encode())))
# filter to include only samples in this assembly
mask = np.array([i in self.data.snames for i in self.names])
self.names = np.array(self.names)[mask].tolist()
if not self.filter_dups():
# [ref] store consens read start position as mapped to ref
self.nidxs = np.array(self.nidxs)[mask].tolist()
self.useqs = np.array(self.useqs)[mask, :].astype(np.uint8)
self.aseqs = np.array(self.aseqs)[mask, :].astype(np.uint8)
def run(self):
# iterate through loci in the chunk
while 1:
try:
self.next_locus()
except StopIteration:
break
# fill filter 0
if self.filter_dups():
continue
# apply filters
edges = Edges(self.data, self.useqs)
edges.get_edges()
self.edges[self.iloc] = edges.edges
# fill filter 4
self.filter_minsamp_pops()
self.filters[self.iloc, 4] += int(edges.bad)
# trim edges, need to use uppered seqs for maxvar & maxshared
edg = self.edges[self.iloc]
ublock = self.useqs[:, edg[0]:edg[3]]
ablock = self.aseqs[:, edg[0]:edg[3]]
# filter if are any empty samples after trimming
self.filters[self.iloc, 4] += np.sum(np.all(ublock == 45, axis=1))
# bail out of locus now if it is already bad...
if self.filters[self.iloc].sum():
continue
# [denovo]: store shift of left edge start position from
# alignment, this position is needed for pulling depths in VCF.
# [ref]: nidx string will be updated in to_locus() with edg
self.masked = None
if not self.isref:
# what is the leftmost consens edge (not -)
ishift = [
np.where(self.aseqs[i] != 45)[0].min()
for i in range(self.aseqs.shape[0])
]
# fill nidxs with nidxs and shift info
inidxs = []
for idx, (i, j) in enumerate(zip(self.nidxs, ishift)):
# add to ishift if trimmed region contains indels
indshift = (self.aseqs[idx, j:edges.edges[0]] == 45).size
inidxs.append("{}-{}".format(i, j + indshift))
self.nidxs = inidxs
# mask insert in denovo data
self.aseqs[:, edges.edges[1]:edges.edges[2]] = 110 # n
self.useqs[:, edges.edges[1]:edges.edges[2]] = 78 # N
# for is-ref we need to mask the insert between pairs
else:
if self.ispair and self.data.params.min_samples_locus > 1:
inserts = np.all(ublock[1:, :] == 78, axis=0)
self.masked = ublock[:, np.invert(inserts)]
# apply filters on edge trimmed reads
self.filter_maxindels(ublock)
# get snpstring on trimmed reads
if self.isref and self.data.hackersonly.exclude_reference:
snparr = self.get_snpsarrs(ublock, True)
else:
snparr = self.get_snpsarrs(ublock)
self.filter_maxvars(ublock, snparr)
# apply filters on edge trimmed reads
self.filter_maxshared(ublock)
# store stats for the locus that passed filtering
if not self.filters[self.iloc, :].sum():
# do sample and locus counters
for name in self.names:
self.scov[name] += 1
# advance locus counter
if self.isref and self.data.hackersonly.exclude_reference:
self.lcov[self.useqs.shape[0] - 1] += 1
else:
self.lcov[self.useqs.shape[0]] += 1
# do SNP distribution counter
if self.masked is None:
self.nbases += ublock.shape[1]
else:
self.nbases += self.masked.shape[1]
self.var[snparr[:, :].sum()] += 1
self.pis[snparr[:, 1].sum()] += 1
# write to .loci string
locus = self.to_locus(ablock, snparr, edg)
self.outlist.append(locus)
# If no loci survive filtering then don't write the files
if np.fromiter(self.lcov.values(), dtype=int).sum() > 0:
# write the chunk to tmpdir
with open(self.outfile, 'w') as outchunk:
outchunk.write("\n".join(self.outlist) + "\n")
# thin edgelist to filtered loci and write to array
mask = np.invert(self.filters.sum(axis=1).astype(np.bool_))
np.save(self.outarr, self.edges[mask, 0])
# close file handle
self.io.close()
def to_locus(self, block, snparr, edg):
"write chunk to a loci string"
# store as a list
locus = []
# convert snparrs to snpstrings
snpstring = "".join([
"-" if snparr[i, 0] else "*" if snparr[i, 1] else " "
for i in range(len(snparr))
])
# get nidx string for getting vcf depths to match SNPs
if self.isref:
# get ref position from nidxs
refpos = ":".join(self.nidxs[0].rsplit(":", 2)[-2:])
# trim ref position string for edge trims
chrom, pos = refpos.split(":")
ostart, end = pos.split("-")
start = int(ostart) + edg[0]
end = start + (edg[3] - edg[0])
# get consens hit indexes and start positions
nidbits = []
for bit in self.nidxs[1:]:
# handle multiple consens merged
bkey = []
for cbit in bit.split(";"):
cidx, _, pos = cbit.split(":")
# start pos of sample is its consens start pos + ostart
# where ostart is the ref position start after trim. So
# how far ahead of ref start does the consens read start.
posplus = int(pos.split("-")[0]) - int(ostart)
bkey.append("{}:{}".format(cidx, posplus))
nidbits.append("-".join(bkey))
# put ref back into string and append consens hits
refpos = "{}:{}-{}".format(chrom, start, end)
nidbits = [refpos] + nidbits
nidxstring = ",".join(nidbits)
# denovo stores start read start position in the nidx string
else:
nidxstring = ",".join(self.nidxs)
# if not paired data (with an insert)
for idx, name in enumerate(self.names):
locus.append(
"{}{}".format(
self.data.pnames[name],
block[idx, :].tostring().decode())
)
locus.append("{}{}|{}|".format(
self.data.snppad, snpstring, nidxstring))
return "\n".join(locus)
def filter_dups(self):
if len(set(self.names)) < len(self.names):
self.filters[self.iloc, 0] = 1
return True
return False
def filter_minsamp_pops(self):
"filter by minsamp or by minsamp x populations"
# default: no population information
if not self.data.populations:
if len(self.names) < self.minsamp: # data.params.min_samples_locus:
# store locus filter
self.filters[self.iloc, 4] = 1
# return True
# return False
# use populations
else:
minfilters = []
for pop in self.data.populations:
samps = self.data.populations[pop][1]
minsamp = self.data.populations[pop][0]
if len(set(samps).intersection(set(self.names))) < minsamp:
minfilters.append(pop)
if any(minfilters):
self.filters[self.iloc, 4] = 1
# return True
# return False
def filter_maxindels(self, ublock):
"max size of internal indels. Denovo vs. Ref, single versus paired."
# get max indels for read1, read2
inds = maxind_numba(ublock)
if inds > self.maxinds:
self.filters[self.iloc, 1] = 1
# return True
# return False
def filter_maxvars(self, ublock, snpstring):
# mask insert area
if self.masked is not None:
if snpstring.sum() > (self.masked.shape[1] * self.fmaxsnps):
self.filters[self.iloc, 2] = 1
# return True
# use full locus
else:
if snpstring.sum() > (ublock.shape[1] * self.fmaxsnps):
self.filters[self.iloc, 2] = 1
# return True
# return False
def filter_maxshared(self, ublock):
nhs = count_maxhet_numba(ublock)
if nhs > (self.fmaxhet * ublock.shape[0]):
self.filters[self.iloc, 3] = 1
# return True
# return False
def get_snpsarrs(self, block, exclude_ref=False):
"count nsnps with option to exclude reference sample from count"
snpsarr = np.zeros((block.shape[1], 2), dtype=np.bool_)
return snpcount_numba(block, snpsarr, int(bool(exclude_ref)))
##############################################################
class Edges:
"Trims edges of overhanging sequences, cutsites, and pair inserts"
def __init__(self, data, seqs):
self.data = data
self.seqs = seqs
# params
self.bad = False
self.exclude_ref = self.data.hackersonly.exclude_reference
self.edges = np.array([0, 0, 0, self.seqs.shape[1]])
self.trims = np.array([0, 0, 0, 0]) # self.seqs.shape[1]])
self.minlen = self.data.params.filter_min_trim_len
# to be filled
self.trimseq = None
def get_edges(self):
# -1 to site coverage if ref is excluded from the count
minsites_left = self.data.hackersonly.trim_loci_min_sites
minsites_right = self.data.hackersonly.trim_loci_min_sites
if "reference" in self.data.params.assembly_method:
if self.exclude_ref:
minsites_left -= 1
minsites_right -= 1
# get .edges of good locus or .bad
self.trim_for_coverage(
minsite_left=minsites_left,
minsite_right=minsites_right,
)
# fill trimseq with the trimmed sequence array
self.trimseq = self.seqs[:, self.edges[0]:self.edges[3]]
# apply edge filtering to locus
try:
if not self.bad:
self.trim_overhangs()
self.trim_param_trim_loci()
except Exception: # TypeError
self.bad = True
# TODO: logger here for errors
# check that locus has anything left
self.trim_check()
def trim_for_coverage(self, minsite_left=4, minsite_right=4):
"trim edges to where data is not N or -"
# what is the limit of site coverage for trimming?
minsamp_left = min(minsite_left, self.seqs.shape[0])
minsamp_right = min(minsite_right, self.seqs.shape[0])
# how much cov is there at each site?
mincovs = np.sum((self.seqs != 78) & (self.seqs != 45), axis=0)
# locus left trim
self.edges[0] = locus_left_trim(self.seqs, minsamp_left, mincovs)
self.edges[3] = locus_right_trim(self.seqs, minsamp_right, mincovs)
if self.edges[3] <= self.edges[0]:
self.bad = True
# find insert region for paired data to mask it...
self.edges[1] = 0
self.edges[2] = 0
def trim_overhangs(self):
"fuzzy match to trim the restriction_overhangs from r1 and r2"
# trim left side for overhang
for cutter in self.data.params.restriction_overhang:
# skip if None
if not cutter:
continue
# will be ints for py2/3
cutter = np.array(list(bytes(cutter.encode())))
# compare match over cut size skipping Ns and allow .25 diffs
slx = slice(0, cutter.shape[0])
matching = self.trimseq[:, slx] == cutter
mask = np.where(
(self.trimseq[:, slx] != 78) & (self.trimseq[:, slx] != 45))
matchset = matching[mask]
if float(matchset.sum()) / matchset.size >= 0.75:
self.trims[0] = len(cutter)
# trim right side for overhang
if self.data.params.restriction_overhang[1]:
# revcomp the cutter (string not array)
# cutter = np.array(list(bcomp(cutter.encode())[::-1]))
slx = slice(
self.trimseq.shape[1] - cutter.shape[0], self.trimseq.shape[1])
matching = self.trimseq[:, slx] == cutter
mask = np.where(
(self.trimseq[:, slx] != 78) & (self.trimseq[:, slx] != 45))
matchset = matching[mask]
if float(matchset.sum()) / matchset.size >= 0.75:
self.trims[3] = len(cutter)
def trim_param_trim_loci(self):
"user entered hard trims"
self.trims[0] = max([self.trims[0], self.data.params.trim_loci[0]])
self.trims[1] = (self.trims[1] - self.data.params.trim_loci[1]
if self.trims[1] else 0)
self.trims[2] = (self.trims[2] + self.data.params.trim_loci[2]
if self.trims[2] else 0)
self.trims[3] = max([self.trims[3], self.data.params.trim_loci[3]])
def trim_check(self):
self.edges[0] += self.trims[0]
self.edges[1] -= self.trims[1]
self.edges[2] += self.trims[2]
self.edges[3] -= self.trims[3]
# checks
if any(self.edges < 0):
self.bad = True
if self.edges[3] <= self.edges[0]:
self.bad = True
if self.edges[1] > self.edges[2]:
self.bad = True
# check total length including insert
if (self.edges[3] - self.edges[0]) < self.minlen:
self.bad = True
@njit
def locus_left_trim(seqs, minsamp, mincovs):
leftmost = np.where(mincovs >= minsamp)[0]
if leftmost.size:
return leftmost.min()
return 0
@njit
def locus_right_trim(seqs, minsamp, mincovs):
rightmost = np.where(mincovs >= minsamp)[0]
if rightmost.size:
return rightmost.max() + 1
return 0
###############################################################
def convert_outputs(data, oformat):
try:
Converter(data).run(oformat)
except Exception as inst:
# Allow one file to fail without breaking all step 7
raise IPyradError("Error creating outfile: {}\n{}\t{}".format(
OUT_SUFFIX[oformat],
type(inst).__name__,
inst))
###############################################################
class Converter:
"functions for converting hdf5 arrays into output files"
def __init__(self, data):
self.data = data
self.output_formats = self.data.params.output_formats
self.seqs_database = self.data.seqs_database
self.snps_database = self.data.snps_database
self.exclude_ref = (
self.data.hackersonly.exclude_reference and self.data.isref)
def run(self, oformat):
# phy array outputs
if oformat == "p":
self.write_phy()
# phy array + phymap outputs
if oformat == "n":
self.write_nex()
if oformat == "G":
self.write_gphocs()
# phy array + phymap + populations outputs
if oformat == "m":
pass
# snps array + snpsmap outputs
if oformat == "s":
self.write_snps()
self.write_snps_map()
# recommended to use analysis tools for unlinked sampling.
if oformat == "u":
self.write_usnps()
if oformat == "k":
self.write_str()
if oformat == "g":
self.write_geno()
if oformat == "t":
self.write_treemix()
def write_phy(self):
# write from hdf5 array
with open(self.data.outfiles.phy, 'w') as out:
with h5py.File(self.seqs_database, 'r') as io5:
# load seqarray
seqarr = io5['phy']
arrsize = io5['phymap'][-1, 2]
# if reference then inserts are not trimmed from phy
#
# write dims
if self.exclude_ref:
out.write("{} {}\n".format(len(self.data.snames) - 1, arrsize))
rowstart = 1
else:
out.write("{} {}\n".format(len(self.data.snames), arrsize))
rowstart = 0
# write to disk
for idx in range(rowstart, io5['phy'].shape[0]):
seq = seqarr[idx, :arrsize].view("S1")
out.write(
"{}{}".format(
self.data.pnames[self.data.snames[idx]],
b"".join(seq).decode().upper() + "\n",
)
)
def write_nex(self):
# write from hdf5 array
with open(self.data.outfiles.nex, 'w') as out:
with h5py.File(self.seqs_database, 'r') as io5:
# load seqarray (this could be chunked, this can be >50Gb)
seqarr = io5['phy'][:]
arrsize = io5['phymap'][-1, 2]
# option: exclude reference sequence
if self.exclude_ref:
# write nexus seq header
out.write(NEXHEADER.format(seqarr.shape[0] - 1, arrsize))
rstart = 1
else:
# write nexus seq header
out.write(NEXHEADER.format(seqarr.shape[0], arrsize))
rstart = 0
# get the name order for every block
xnames = [
self.data.pnames[self.data.snames[i]]
for i in range(rstart, len(self.data.snames))
]
# grab a big block of data
chunksize = 100000 # this should be a multiple of 100
for bidx in range(0, arrsize, chunksize):
bigblock = seqarr[rstart:, bidx:bidx + chunksize]
lend = int(arrsize - bidx)
# store interleaved seqs 100 chars with longname+2 before
tmpout = []
for block in range(0, min(chunksize, lend), 100):
stop = min(block + 100, arrsize)
for idx, name in enumerate(xnames):
# py2/3 compat --> b"TGCGGG..."
seqdat = bytes(bigblock[idx, block:stop])
tmpout.append(" {}{}\n".format(
name,
seqdat.decode().upper()))
tmpout.append("\n")
# TODO, double check end of matrix...
## print intermediate result and clear
if any(tmpout):
out.write("".join(tmpout))
# closer
out.write(NEXCLOSER)
# add partition information from maparr
maparr = io5["phymap"][:, 2]
charsetblock = []
charsetblock.append("BEGIN SETS;")
# the first block
charsetblock.append("charset {} = {}-{};".format(
0, 1, maparr[0],
))
# all other blocks
# nexus is 1-indexed. mapparr is dtype np.uint64, so adding
# a standard int results in np.float64, so cast uint64 first
for idx in range(0, maparr.shape[0] - 1):
charsetblock.append("charset {} = {}-{};".format(
idx + 1, maparr[idx] + np.uint64(1) , maparr[idx + 1]
)
)
charsetblock.append("END;")
out.write("\n".join(charsetblock))
def write_snps(self):
# write from hdf5 array
with open(self.data.outfiles.snps, 'w') as out:
with h5py.File(self.snps_database, 'r') as io5:
# load seqarray
seqarr = io5['snps']
# option to skip ref
if self.exclude_ref:
nsamples = len(self.data.snames) - 1
rstart = 1
else:
nsamples = len(self.data.snames)
rstart = 0
# write dims
out.write("{} {}\n".format(nsamples, seqarr.shape[1]))
# write to disk one row at a time
# (todo: chunk optimize for this.)
for idx in range(rstart, io5['snps'].shape[0]):
seq = seqarr[idx, :].view("S1")
out.write(
"{}{}".format(
self.data.pnames[self.data.snames[idx]],
b"".join(seq).decode().upper() + "\n",
)
)
def write_usnps(self):
with open(self.data.outfiles.usnps, 'w') as out:
with h5py.File(self.snps_database, 'r') as io5:
# load seqarray
snparr = io5['snps'][:]
# snp positions
maparr = io5["snpsmap"][:, :2]
maparr[:, 1] = range(maparr.shape[0])
# get n unlinked snps
subs = subsample(maparr)
nsnps = subs.size
# option to skip ref
if self.exclude_ref:
nsamples = len(self.data.snames) - 1
rstart = 1
else:
nsamples = len(self.data.snames)
rstart = 0
# write dims
out.write("{} {}\n".format(nsamples, nsnps))
# write to disk one row at a time
for idx in range(rstart, snparr.shape[0]):
# get all SNPS from this sample
seq = snparr[idx, subs].view("S1")
out.write(
"{}{}".format(
self.data.pnames[self.data.snames[idx]],
b"".join(seq).decode().upper() + "\n",
)
)
## Write the other unlinked formats
self.write_ustr(snparr[:, subs])
genos = io5['genos'][:]
self.write_ugeno(genos[subs, :])
def write_ustr(self, snparr):
with open(self.data.outfiles.ustr, 'w') as out:
# option to skip ref
if self.exclude_ref:
rstart = 1
else:
rstart = 0
for idx in range(rstart, snparr.shape[0]):
# get all SNPS from this sample
seq = snparr[idx, :].view("S1")
# get sample name
name = self.data.pnames[self.data.snames[idx]]
# get row of data
snps = snparr[idx, :].view("S1")
# expand for ambiguous bases
snps = [BTS[i.upper()] for i in snps]
# convert to numbers and write row for each resolution
sequence = "\t".join([STRDICT[i[0]] for i in snps])
out.write(
"{}\t\t\t\t\t{}\n"
.format(name, sequence))
## Write out the second allele if it exists
if self.data.params.max_alleles_consens > 1:
sequence = "\t".join([STRDICT[i[1]] for i in snps])
out.write(
"{}\t\t\t\t\t{}\n"
.format(name, sequence))
def write_ugeno(self, genos):
with open(self.data.outfiles.ugeno, 'w') as out:
# option to skip ref
if self.exclude_ref:
rstart = 1
else:
rstart = 0
genos = genos[:, rstart:]
snpgenos = np.zeros(genos.shape[:2], dtype=np.uint8)
snpgenos.fill(9)
# fill (0, 0)
snpgenos[np.all(genos == 0, axis=2)] = 2
# fill (0, 1) and (1, 0)
snpgenos[np.sum(genos, axis=2) == 1] = 1
# fill (1, 1)
snpgenos[np.all(genos == 1, axis=2)] = 0
# write to file
np.savetxt(out, snpgenos, delimiter="", fmt="%d")
def write_snps_map(self):
"write a map file with linkage information for SNPs file"
counter = 1
with open(self.data.outfiles.snpsmap, 'w') as out:
with h5py.File(self.data.snps_database, 'r') as io5:
# access array of data
maparr = io5["snpsmap"]
## write to map file in chunks of 10000
for start in range(0, maparr.shape[0], 10000):
outchunk = []
# grab chunk
rdat = maparr[start:start + 10000, :]
# get chroms
if self.data.isref:
revdict = chroms2ints(self.data, 1)
for i in rdat:
outchunk.append(
"{}\t{}:{}\t{}\t{}\n"
.format(
i[0],
# 1-index to 0-index fix (1/6/19)
revdict[i[3] - 1], i[4],
i[2] + 1,
counter,
#i[4],
)
)
counter += 1
else:
# convert to text for writing
for i in rdat:
outchunk.append(
"{}\tloc{}_snp{}_pos{}\t{}\t{}\n"
.format(
i[0],
i[0] - 1, i[4] - 1, i[2],
i[2] + 1,
counter,
#i[4],
)
)
counter += 1
# write chunk to file
out.write("".join(outchunk))
outchunk = []
def write_str(self):
# write data from snps database, resolve ambiguous bases and numeric.
with open(self.data.outfiles.str, 'w') as out:
with h5py.File(self.data.snps_database, 'r') as io5:
snparr = io5["snps"]
# option to skip ref
if self.exclude_ref:
rstart = 1
else:
rstart = 0
for idx in range(rstart, len(self.data.snames)):
# get sample name
name = self.data.pnames[self.data.snames[idx]]
# get row of data
snps = snparr[idx, :].view("S1")
# expand for ambiguous bases
snps = [BTS[i.upper()] for i in snps]
# convert to numbers and write row for each resolution
sequence = "\t".join([STRDICT[i[0]] for i in snps])
out.write(
"{}\t\t\t\t\t{}\n"
.format(name, sequence))
## Write out the second allele if it exists
if self.data.params.max_alleles_consens > 1:
sequence = "\t".join([STRDICT[i[1]] for i in snps])
out.write(
"{}\t\t\t\t\t{}\n"
.format(name, sequence))
def write_gphocs(self):
"b/c it is similar to .loci we just parse .loci and modify it."
with open(self.data.outfiles.gphocs, 'w') as out:
indat = iter(open(self.data.outfiles.loci, 'r'))
# write nloci header
out.write("{}\n".format(
self.data.stats_dfs.s7_loci["sum_coverage"].max()))
# read in each locus at a time
idx = 0
loci = []
locus = []
while 1:
try:
line = next(indat)
except StopIteration:
indat.close()
break
# end of locus
if line.endswith("|\n"):
# write stats and locus to string and store
nsamp = len(locus)
slen = len(locus[0].split()[-1])
locstr = ["locus{} {} {}\n".format(idx, nsamp, slen)]
loci.append("".join(locstr + locus))
# reset locus
idx += 1
locus = []
else:
locus.append(line)
if not idx % 10000:
out.write("\n".join(loci))
loci = []
# write to file
if loci:
out.write("\n".join(loci))
def write_geno(self):
with open(self.data.outfiles.geno, 'w') as out:
with h5py.File(self.data.snps_database, 'r') as io5:
# option to skip ref
if self.exclude_ref:
rstart = 1
else:
rstart = 0
genos = io5["genos"][:, rstart:]
snpgenos = np.zeros(genos.shape[:2], dtype=np.uint8)
snpgenos.fill(9)
# fill (0, 0)
snpgenos[np.all(genos == 0, axis=2)] = 2
# fill (0, 1) and (1, 0)
snpgenos[np.sum(genos, axis=2) == 1] = 1
# fill (1, 1)
snpgenos[np.all(genos == 1, axis=2)] = 0
# write to file
np.savetxt(out, snpgenos, delimiter="", fmt="%d")
def write_treemix(self):
# We pass in 'binary="ls"' here to trick the constructor into not
# raising an error if treemix isn't installed. HAX!
import ipyrad.analysis as ipa
tmx = ipa.treemix(
data=self.data.snps_database,
name=self.data.name,
workdir=self.data.dirs.outfiles,
imap={i: j[1] for (i, j) in self.data.populations.items()},
minmap={i: j[0] for (i, j) in self.data.populations.items()},
binary="ls",
)
tmx.write_treemix_file()
def write_migrate(self):
import ipyrad.analysis as ipa
mig = ipa.migrate_n(
data=self.data.outfiles.loci,
name=self.data.name,
workdir=self.data.dirs.outfiles,
imap={i: j[1] for (i, j) in self.data.populations.items()},
minmap={i: j[0] for (i, j) in self.data.populations.items()},
)
mig.write_seqfile()
# ------------------------------------------------------------
# funcs parallelized on remote engines
# -------------------------------------------------------------
def write_loci_and_alleles(data):
# get faidict to convert chroms to ints
if data.isref:
faidict = chroms2ints(data, True)
# write alleles file
allel = 'a' in data.params.output_formats
# gather all loci bits
locibits = glob.glob(os.path.join(data.tmpdir, "*.loci"))
sortbits = sorted(locibits,
key=lambda x: int(x.rsplit("-", 1)[-1][:-5]))
# what is the length of the name padding?
with open(sortbits[0], 'r') as test:
pad = np.where(np.array(list(test.readline())) == " ")[0].max()
# write to file while adding counters to the ordered loci
outloci = open(data.outfiles.loci, 'w')
if allel:
outalleles = open(data.outfiles.alleles, 'w')
idx = 0
for bit in sortbits:
# store until writing
lchunk = []
achunk = []
# LOCI ONLY: iterate through chunk files
if not allel:
indata = open(bit, 'r')
for line in iter(indata):
# skip reference lines if excluding
if data.hackersonly.exclude_reference:
if "reference " in line:
continue
# write name, seq pairs
if "|\n" not in line:
lchunk.append(line[:pad] + line[pad:].upper())
# write snpstring and info
else:
snpstring, nidxs = line.rsplit("|", 2)[:2]
if data.params.assembly_method == 'reference':
refpos = nidxs.split(",")[0]
# translate refpos chrom idx (1-indexed) to chrom name
cid, rid = refpos.split(":")
cid = faidict[int(cid) - 1]
lchunk.append(
"{}|{}:{}:{}|\n".format(snpstring, idx, cid, rid))
else:
lchunk.append(
"{}|{}|\n".format(snpstring, idx))
idx += 1
# close bit handle
indata.close()
# ALLELES: iterate through chunk files to write LOCI AND ALLELES
else:
indata = open(bit, 'r')
for line in iter(indata):
# skip reference lines if excluding
if data.hackersonly.exclude_reference:
if "reference " in line:
continue
if "|\n" not in line:
name = line[:pad]
seq = line[pad:]
lchunk.append(name + seq.upper())
all1, all2 = splitalleles(seq)
aname, spacer = name.split(" ", 1)
# adjust seqnames for proper buffering of the snpstring
achunk.append(aname + "_0 " + spacer[2:] + all1)
achunk.append(aname + "_1 " + spacer[2:] + all2)
else:
snpstring, nidxs = line.rsplit("|", 2)[:2]
# adjust length of snpstring so it lines up for refseq
asnpstring = "//" + snpstring[2:]
if data.params.assembly_method == 'reference':
refpos = nidxs.split(",")[0]
# translate refpos chrom idx (1-indexed) to chrom name
cid, rid = refpos.split(":")
cid = faidict[int(cid) - 1]
lchunk.append(
"{}|{}:{}:{}|\n".format(snpstring, idx, cid, rid))
achunk.append(
"{}|{}:{}:{}|\n".format(asnpstring, idx, cid, rid))
else:
lchunk.append(
"{}|{}|\n".format(line.rsplit("|", 2)[0], idx))
achunk.append(
"{}|{}|\n".format(line.rsplit("|", 2)[0], idx))
idx += 1
indata.close()
outalleles.write("".join(achunk))
outloci.write("".join(lchunk))
outloci.close()
if allel:
outalleles.close()
def pseudoref2ref(pseudoref, ref):
"""
Reorder psuedoref (observed bases at snps sites) to have the ref allele
listed first. On rare occasions when ref is 'N' then
"""
# create new empty array
npseudo = np.zeros(pseudoref.shape, dtype=np.uint8)
# at all sites where pseudo 0 matches reference, leave it
matched = np.where(pseudoref[:, 0] == ref)[0]
npseudo[matched] = pseudoref[matched, :]
# at other sites, shift order so ref is first
notmatched = np.where(pseudoref[:, 0] != ref)[0]
for row in notmatched:
dat = list(pseudoref[row])
# skips if ref allele is missing (N)
try:
# pop ref and insert it
new = dat.pop(dat.index(ref[row]))
dat.insert(0, new)
npseudo[row] = dat
except ValueError:
npseudo[row] = pseudoref[row]
return npseudo
def fill_seq_array(data, ntaxa, nbases, nloci):
# init/reset hdf5 database
with h5py.File(data.seqs_database, 'w') as io5:
# temporary array data sets
phy = io5.create_dataset(
name="phy",
shape=(ntaxa, nbases),
dtype=np.uint8,
)
# temporary array data sets
phymap = io5.create_dataset(
name="phymap",
shape=(nloci, 5),
dtype=np.uint64,
)
# store attrs of the reference genome to the phymap
if data.params.assembly_method == 'reference':
io5["scaffold_lengths"] = get_fai_values(data, "length")
io5["scaffold_names"] = get_fai_values(data, "scaffold").astype("S")
phymap.attrs["reference"] = data.params.reference_sequence
else:
phymap.attrs["reference"] = "pseudoref"
# store names and
phymap.attrs["phynames"] = [i.encode() for i in data.pnames]
phymap.attrs["columns"] = [
b"chroms", b"phy0", b"phy1", b"pos0", b"pos1",
]
# gather all loci bits
locibits = glob.glob(os.path.join(data.tmpdir, "*.loci"))
sortbits = sorted(locibits,
key=lambda x: int(x.rsplit("-", 1)[-1][:-5]))
# name order for entry in array
snames = data.snames
sidxs = {sample: i for (i, sample) in enumerate(snames)}
# iterate through file
gstart = 0
start = end = 0
maxsize = 100000
tmploc = {}
mapends = []
mapchroms = []
mappos0 = []
mappos1 = []
mapstart = mapend = 0
locidx = 0
# array to store until writing; TODO: Accomodate large files...
tmparr = np.zeros((ntaxa, maxsize + 50000), dtype=np.uint8)
# iterate over chunkfiles
for bit in sortbits:
# iterate lines of file until locus endings
indata = open(bit, 'r')
for line in iter(indata):
# still filling locus until |\n
if "|\n" not in line:
# if empty skip
try:
name, seq = line.split()
tmploc[name] = seq
except ValueError:
continue
# locus is full, dump it
else:
# convert seqs to an array
locidx += 1
# parse chrom:pos-pos
if data.isref:
lineend = line.split("|")[1]
chrom = int(lineend.split(":")[0])
pos0, pos1 = 0, 0
pos0, pos1 = (
int(i) for i in lineend
.split(":")[1]
.split(",")[0]
.split("-")
)
# seq ordered into array by snames as int8 (py2/3 checked)
loc = np.array([
list(bytes(tmploc[i].encode())) for i in snames
if i in tmploc
]).astype(np.int8)
# loc = (np.array([list(i) for i in tmploc.values()])
# .astype(bytes).view(np.uint8))
# TODO: check code here for reference excluded...
# drop the site that are all N or - (e.g., pair inserts)
if (data.isref and data.ispair):
mask = np.all(loc[1:, :] == 78, axis=0)
else:
mask = np.all((loc == 45) | (loc == 78), axis=0)
loc = loc[:, np.invert(mask)]
# store end position of locus for map
end = start + loc.shape[1]
# checked for py2/3 (keeping name order straight important)
lidx = 0
for name in snames:
if name in tmploc:
sidx = sidxs[name]
tmparr[sidx, start:end] = loc[lidx]
lidx += 1
# tnames = sorted(tmploc.keys())
# for idx, name in enumerate(snames):
# if name in tmploc
# sidx = sidxs[name]
# tmparr[sidx, start:end] = loc[idx]
# for idx, name in enumerate(tmploc):
# tmparr[sidxs[name], start:end] = loc[idx]
mapends.append(gstart + end)
if data.isref:
mapchroms.append(chrom)
mappos0.append(pos0)
mappos1.append(pos1)
else:
mapchroms.append(locidx - 1)
# reset locus
start = end
tmploc = {}
# dump tmparr when it gets large
if end > maxsize:
# trim right overflow from tmparr (end filled as 0s)
trim = np.where(tmparr != 0)[1]
if trim.size:
trim = trim.max() + 1
else:
trim = tmparr.shape[1]
# fill missing with 78 (N)
tmparr[tmparr == 0] = 78
# dump tmparr to hdf5
phy[:, gstart:gstart + trim] = tmparr[:, :trim]
phymap[mapstart:locidx, 0] = mapchroms
phymap[mapstart:locidx, 2] = mapends
if data.isref:
phymap[mapstart:locidx, 3] = mappos0
phymap[mapstart:locidx, 4] = mappos1
mapstart = locidx
mapends = []
mapchroms = []
mappos0 = []
mappos1 = []
# reset
tmparr = np.zeros((ntaxa, maxsize + 50000), dtype=np.uint8)
gstart += trim
start = end = 0
# close bit handle
indata.close()
if start == 0 and end == 0:
# The last chunk fell exactly on the maxsize boundary so it has
# already been trimmed and dumped to the phy. In this case the for
# loop on the line iterator has fallen through (no more data) so
# there is no final chunk to trim.
pass
else:
# trim final chunk tmparr to size
trim = np.where(tmparr != 0)[1]
if trim.size:
trim = trim.max() + 1
else:
trim = tmparr.shape[1]
# fill missing with 78 (N)
tmparr[tmparr == 0] = 78
# dump tmparr and maplist to hdf5. Because we dropped sites that are
# all N or - the length of phy data can be less than nbases and so
# there can be 000 at the end of phy. This is ok, we trim it when
# writing phylip file, but good to be aware it's there for other things
phy[:, gstart:gstart + trim] = tmparr[:, :trim]
phymap[mapstart:locidx, 0] = mapchroms
phymap[mapstart:locidx, 2] = mapends
if data.isref:
phymap[mapstart:locidx, 3] = mappos0
phymap[mapstart:locidx, 4] = mappos1
phymap[1:, 1] = phymap[:-1, 2]
# fill 'scaffold' information for denovo data sets from the data
if "reference" not in data.params.assembly_method:
# 1-index the "chromosomes" and store lengths as loclens
phymap[:, 0] += 1 # <- does not cause problems for outfiles...
io5["scaffold_names"] = (io5["phymap"][:, 0]).astype("S")
io5["scaffold_lengths"] = io5["phymap"][:, 2] - io5["phymap"][:, 1]
# write stats to the output file
with open(data.stats_files.s7, 'a') as outstats:
trim = phymap[-1, 2] # locidx - 1]
missmask = phy[:trim] == 78
missmask += phy[:trim] == 45
missing = 100 * (missmask.sum() / float(phy[:trim].size))
print("sequence matrix size: ({}, {}), {:.2f}% missing sites."
.format(
len(snames),
trim,
max(0, missing),
),
file=outstats,
)
def fill_snp_array(data, ntaxa, nsnps):
# open new database file handle
with h5py.File(data.snps_database, 'w') as io5:
# Database files for storing arrays on disk.
# Should optimize for slicing by rows if we run into slow writing, or
# it uses too much mem. For now letting h5py to auto-chunking.
io5.create_dataset(
name="snps",
shape=(ntaxa, nsnps),
dtype=np.uint8,
)
# store snp locations:
# (loc-counter, loc-snp-counter, loc-snp-pos, chrom, chrom-snp-pos)
io5.create_dataset(
name="snpsmap",
shape=(nsnps, 5),
dtype=np.uint32,
)
# store snp locations
io5.create_dataset(
name="pseudoref",
shape=(nsnps, 4),
dtype=np.uint8,
)
# store genotype calls (0/0, 0/1, 0/2, etc.)
io5.create_dataset(
name="genos",
shape=(nsnps, ntaxa, 2),
dtype=np.uint8,
)
# store sample names and snpmap columns names as attributes
io5["snps"].attrs["names"] = [i.encode() for i in data.snames]
io5["snpsmap"].attrs["columns"] = [
b"locus", b"locidx", b"locpos", b"scaf", b"scafpos",
# b"arrpos",
]
# gather all loci bits
locibits = glob.glob(os.path.join(data.tmpdir, "*.loci"))
sortbits = sorted(
locibits,
key=lambda x: int(x.rsplit("-", 1)[-1][:-5])
)
# name order for entry in array
sidxs = {sample: i for (i, sample) in enumerate(data.snames)}
# iterate through file
start = end = 0
tmploc = {}
locidx = 1
snpidx = 1
# array to store until writing
tmparr = np.zeros((ntaxa, nsnps), dtype=np.uint8)
tmpmap = np.zeros((nsnps, 5), dtype=np.uint32)
# iterate over chunkfiles
for bit in sortbits:
# iterate lines of file until locus endings
indata = open(bit, 'r')
for line in iter(indata):
# while still filling locus until |\n store name,seq in dict
if "|\n" not in line:
try:
name, seq = line.split()
tmploc[name] = seq
except ValueError:
continue
# locus is full, dump it
else:
# convert seqs to an np.int8 array, checked py2/3
loc = np.array(
[list(bytes(tmploc[i].encode())) for i in data.snames
if i in tmploc]
).astype(np.int8)
# loc = np.array(
# [list(i) for i in tmploc.values()]
# ).astype(bytes).view(np.uint8)
snps, idxs, _ = line[len(data.snppad):].rsplit("|", 2)
snpsmask = np.array(list(snps)) != " "
snpsidx = np.where(snpsmask)[0]
# select only the SNP sites
snpsites = loc[:, snpsmask]
# store end position of locus for map
end = start + snpsites.shape[1]
# checked for py2/3 (keeping name order straight important)
lidx = 0
for name in data.snames:
if name in tmploc:
sidx = sidxs[name]
tmparr[sidx, start:end] = snpsites[lidx, :]
lidx += 1
# for idx, name in enumerate(tmploc):
# tmparr[sidxs[name], start:end] = snpsites[idx, :]
# store snpsmap data 1-indexed with chroms info
if data.isref:
chrom, pos = idxs.split(",")[0].split(":")
start = int(pos.split("-")[0])
#chromidx = faidict[chrom]
chromidx = int(chrom)
for isnp in range(snpsites.shape[1]):
isnpx = snpsidx[isnp]
tmpmap[snpidx - 1] = (
locidx, isnp, isnpx, chromidx, isnpx + start,
)
snpidx += 1
# store snpsmap data (snpidx is 1-indexed)
else:
for isnp in range(snpsites.shape[1]):
tmpmap[snpidx - 1] = (
locidx, isnp, snpsidx[isnp], 0, snpidx,
)
snpidx += 1
locidx += 1
# reset locus
start = end
tmploc = {}
# close file handle
indata.close()
# fill missing with 78 (N)
tmparr[tmparr == 0] = 78
# dump tmparr and maplist to hdf5
io5['snps'][:] = tmparr[:]
io5['snpsmap'][:] = tmpmap
del tmparr
# write stats output
with open(data.stats_files.s7, 'a') as outstats:
missmask = io5["snps"][:] == 78
missmask += io5["snps"][:] == 45
missing = 100 * (missmask.sum() / float(io5["snps"][:nsnps].size))
print(
"snps matrix size: ({}, {}), {:.2f}% missing sites."
.format(
len(data.snames),
nsnps,
missing,
),
file=outstats,
)
# fill in the reference and geno arrays
# convert snps to characters uppered to get most common as pseudoref
snparr = io5["snps"][:].view("S1")
snparr = np.char.upper(snparr).view(np.uint8)
# store pseudo-ref (most common base)
# with ambiguous bases resolved: (87, 78, 0, 0).
if data.params.assembly_method != 'reference':
io5['pseudoref'][:] = reftrick(snparr, GETCONS)
else:
ref = snparr[data.snames.index('reference')]
pseudoref = reftrick(snparr, GETCONS)
io5['pseudoref'][:] = pseudoref2ref(pseudoref, ref)
# fill for each taxon
for sidx in range(ntaxa):
resos = [DCONS[i] for i in snparr[sidx, :]]
# pseudoref version
io5['genos'][:, sidx, :] = get_genos(
np.array([i[0] for i in resos]),
np.array([i[1] for i in resos]),
io5['pseudoref'][:]
)
###############################################################
class VCF_filler:
"""
Incorporate indels and trim amounts when grabbing depths from CATG arrays
(depth arrays from step 5). Indels are only releveant to denovo data.
"""
def __init__(self, data, nsnps, sample):
# input locus bits
self.locbits = glob.glob(os.path.join(data.tmpdir, "chunk*.loci"))
self.locbits = sorted(
self.locbits, key=lambda x: int(x.rsplit("-", 1)[-1][:-5]))
self.loclines = None
# input arrays of indels arrays
self.indbits = glob.glob(os.path.join(data.tmpdir, "chunk*.indels*"))
if not self.indbits:
self.indbits = [None] * len(self.locbits)
# input trim arrays
self.trimbits = glob.glob(os.path.join(data.tmpdir, "chunk*.npy"))
self.trimbits = sorted(
self.trimbits, key=lambda x: int(x.rsplit("-", 1)[-1][:-4]))
# array to store vcfdepths for this taxon
self.vcfd = np.zeros((nsnps, 4), dtype=np.uint32)
# the sample for this comp
self.sname = sample.name
self.isref = bool(data.isref)
# snpsmap has locations of SNPs on trimmed loci, e.g.,
# no SNPs are on loc 1 and 2, first is on 3 at post-trim pos 11
# [ 3 0 11 1 41935]
# [ 4 0 57 1 56150]
with h5py.File(data.snps_database, 'r') as io5:
self.snpsmap = io5['snpsmap'][:, [0, 2]]
# TODO: scaffs should be ordered (right?) so no need to load it all!
# All catgs for this sample (this could be done more mem efficient...)
with h5py.File(sample.files.database, 'r') as io5:
self.catgs = io5['catg'][:]
self.maxlen = self.catgs.shape[1]
# Sample-level counters
self.locidx = 0
self.snpidx = 0
def run(self):
"loops over chunked files streaming through all loci for this sample"
for idx in range(len(self.locbits)):
self.localidx = 0
self.locfill(idx)
def locfill(self, idx):
"iterates over loci in chunkfile to get and enter catgs for snps"
# load the arrays for this bit
edges = np.load(self.trimbits[idx])
inds = self.indbits[idx]
if inds:
inds = np.load(inds)
# iterate over the chunk of trimmed loci
self.loclines = iter(open(self.locbits[idx], 'r'))
while 1:
# yield increments locidx by 1
try:
self.yield_loc()
except StopIteration:
break
# get snps for this locus (1-indexed locus idxs)
self.locsnps = self.snpsmap[self.snpsmap[:, 0] == self.locidx]
# get global trim for this locus (0-indexed edge arr)
self.gtrim = edges[self.localidx - 1]
# if SNPs and data for this sample enter catgs
if (self.locsnps.size) and (self.sname in self.names):
if self.isref:
self.ref_enter_catgs()
else:
self.denovo_enter_catgs()
else:
# advance SNP counter even though this sample wasn't in SNP
self.snpidx += self.locsnps.shape[0]
def ref_enter_catgs(self):
# map SNP position to pre-trim locus position
nidx = self.names.index(self.sname)
sidx = self.sidxs[nidx]
tups = [[int(j) for j in i.split(":")] for i in sidx.split("-")]
# SNP is in samples, so get and store catg data for locidx
# [0] post-trim chrom:start-end of locus
# [1:] how far ahead of start does this sample start
# FOR DEBUGGING
# seq = seqs[nidx]
# seqarr = np.array(list(seq))
# enter each SNP
for snp in self.locsnps[:, 1]:
# in case multiple consens were merged in step 6 of this sample
for tup in tups:
cidx, coffset = tup
pos = snp + (self.gtrim - coffset)
if (pos >= 0) & (pos < self.maxlen):
self.vcfd[self.snpidx] += self.catgs[cidx, pos]
self.snpidx += 1
def denovo_enter_catgs(self):
"""
Grab catg depths for each SNP position -- needs to take into account
trim from left end, and impution of indels.
"""
nidx = self.names.index(self.sname)
sidx = self.sidxs[nidx]
tups = [[int(j) for j in i.split("-")] for i in sidx.split(":")]
# SNP is in samples, so get and store catg data for locidx
# [0] post-trim chrom:start-end of locus
# [1:] how far ahead of start does this sample start
# FOR DEBUGGING
| seq = self.seqs[nidx] | 7,755 | lcc_e | python | null | 192744c2c14f9572bfc3d71ced42504e69ccf49d2ec9d39a |
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import babel.dates
import collections
from datetime import datetime, timedelta
from dateutil import parser
from dateutil import rrule
from dateutil.relativedelta import relativedelta
import logging
from operator import itemgetter
import pytz
import re
import time
import uuid
from odoo import api, fields, models
from odoo import tools
from odoo.tools.translate import _
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, pycompat
from odoo.exceptions import UserError, ValidationError
_logger = logging.getLogger(__name__)
VIRTUALID_DATETIME_FORMAT = "%Y%m%d%H%M%S"
def calendar_id2real_id(calendar_id=None, with_date=False):
""" Convert a "virtual/recurring event id" (type string) into a real event id (type int).
E.g. virtual/recurring event id is 4-20091201100000, so it will return 4.
:param calendar_id: id of calendar
:param with_date: if a value is passed to this param it will return dates based on value of withdate + calendar_id
:return: real event id
"""
if calendar_id and isinstance(calendar_id, pycompat.string_types):
res = [bit for bit in calendar_id.split('-') if bit]
if len(res) == 2:
real_id = res[0]
if with_date:
real_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT, time.strptime(res[1], VIRTUALID_DATETIME_FORMAT))
start = datetime.strptime(real_date, DEFAULT_SERVER_DATETIME_FORMAT)
end = start + timedelta(hours=with_date)
return (int(real_id), real_date, end.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
return int(real_id)
return calendar_id and int(calendar_id) or calendar_id
def get_real_ids(ids):
if isinstance(ids, (pycompat.string_types, pycompat.integer_types)):
return calendar_id2real_id(ids)
if isinstance(ids, (list, tuple)):
return [calendar_id2real_id(_id) for _id in ids]
def real_id2calendar_id(record_id, date):
return '%s-%s' % (record_id, date.strftime(VIRTUALID_DATETIME_FORMAT))
def is_calendar_id(record_id):
return len(str(record_id).split('-')) != 1
SORT_ALIASES = {
'start': 'sort_start',
'start_date': 'sort_start',
'start_datetime': 'sort_start',
}
def sort_remap(f):
return SORT_ALIASES.get(f, f)
class Contacts(models.Model):
_name = 'calendar.contacts'
user_id = fields.Many2one('res.users', 'Me', required=True, default=lambda self: self.env.user)
partner_id = fields.Many2one('res.partner', 'Employee', required=True)
active = fields.Boolean('Active', default=True)
_sql_constraints = [
('user_id_partner_id_unique', 'UNIQUE(user_id,partner_id)', 'An user cannot have twice the same contact.')
]
@api.model
def unlink_from_partner_id(self, partner_id):
return self.search([('partner_id', '=', partner_id)]).unlink()
class Attendee(models.Model):
""" Calendar Attendee Information """
_name = 'calendar.attendee'
_rec_name = 'common_name'
_description = 'Attendee information'
def _default_access_token(self):
return uuid.uuid4().hex
STATE_SELECTION = [
('needsAction', 'Needs Action'),
('tentative', 'Uncertain'),
('declined', 'Declined'),
('accepted', 'Accepted'),
]
state = fields.Selection(STATE_SELECTION, string='Status', readonly=True, default='needsAction',
help="Status of the attendee's participation")
common_name = fields.Char('Common name', compute='_compute_common_name', store=True)
partner_id = fields.Many2one('res.partner', 'Contact', readonly="True")
email = fields.Char('Email', help="Email of Invited Person")
availability = fields.Selection([('free', 'Free'), ('busy', 'Busy')], 'Free/Busy', readonly="True")
access_token = fields.Char('Invitation Token', default=_default_access_token)
event_id = fields.Many2one('calendar.event', 'Meeting linked', ondelete='cascade')
@api.depends('partner_id', 'partner_id.name', 'email')
def _compute_common_name(self):
for attendee in self:
attendee.common_name = attendee.partner_id.name or attendee.email
@api.onchange('partner_id')
def _onchange_partner_id(self):
""" Make entry on email and availability on change of partner_id field. """
self.email = self.partner_id.email
@api.model
def create(self, values):
if not values.get("email") and values.get("common_name"):
common_nameval = values.get("common_name").split(':')
email = [x for x in common_nameval if '@' in x] # TODO JEM : should be refactored
values['email'] = email and email[0] or ''
values['common_name'] = values.get("common_name")
return super(Attendee, self).create(values)
@api.multi
def copy(self, default=None):
raise UserError(_('You cannot duplicate a calendar attendee.'))
@api.multi
def _send_mail_to_attendees(self, template_xmlid, force_send=False):
""" Send mail for event invitation to event attendees.
:param template_xmlid: xml id of the email template to use to send the invitation
:param force_send: if set to True, the mail(s) will be sent immediately (instead of the next queue processing)
"""
res = False
if self.env['ir.config_parameter'].sudo().get_param('calendar.block_mail') or self._context.get("no_mail_to_attendees"):
return res
calendar_view = self.env.ref('calendar.view_calendar_event_calendar')
invitation_template = self.env.ref(template_xmlid)
# get ics file for all meetings
ics_files = self.mapped('event_id').get_ics_file()
# prepare rendering context for mail template
colors = {
'needsAction': 'grey',
'accepted': 'green',
'tentative': '#FFFF00',
'declined': 'red'
}
rendering_context = dict(self._context)
rendering_context.update({
'color': colors,
'action_id': self.env['ir.actions.act_window'].search([('view_id', '=', calendar_view.id)], limit=1).id,
'dbname': self._cr.dbname,
'base_url': self.env['ir.config_parameter'].sudo().get_param('web.base.url', default='http://localhost:8069')
})
invitation_template = invitation_template.with_context(rendering_context)
# send email with attachments
mails_to_send = self.env['mail.mail']
for attendee in self:
if attendee.email or attendee.partner_id.email:
# FIXME: is ics_file text or bytes?
ics_file = ics_files.get(attendee.event_id.id)
mail_id = invitation_template.send_mail(attendee.id)
vals = {}
if ics_file:
vals['attachment_ids'] = [(0, 0, {'name': 'invitation.ics',
'mimetype': 'text/calendar',
'datas_fname': 'invitation.ics',
'datas': base64.b64encode(ics_file)})]
vals['model'] = None # We don't want to have the mail in the tchatter while in queue!
vals['res_id'] = False
current_mail = self.env['mail.mail'].browse(mail_id)
current_mail.mail_message_id.write(vals)
mails_to_send |= current_mail
if force_send and mails_to_send:
res = mails_to_send.send()
return res
@api.multi
def do_tentative(self):
""" Makes event invitation as Tentative. """
return self.write({'state': 'tentative'})
@api.multi
def do_accept(self):
""" Marks event invitation as Accepted. """
result = self.write({'state': 'accepted'})
for attendee in self:
attendee.event_id.message_post(body=_("%s has accepted invitation") % (attendee.common_name), subtype="calendar.subtype_invitation")
return result
@api.multi
def do_decline(self):
""" Marks event invitation as Declined. """
res = self.write({'state': 'declined'})
for attendee in self:
attendee.event_id.message_post(body=_("%s has declined invitation") % (attendee.common_name), subtype="calendar.subtype_invitation")
return res
class AlarmManager(models.AbstractModel):
_name = 'calendar.alarm_manager'
def get_next_potential_limit_alarm(self, alarm_type, seconds=None, partner_id=None):
result = {}
delta_request = """
SELECT
rel.calendar_event_id, max(alarm.duration_minutes) AS max_delta,min(alarm.duration_minutes) AS min_delta
FROM
calendar_alarm_calendar_event_rel AS rel
LEFT JOIN calendar_alarm AS alarm ON alarm.id = rel.calendar_alarm_id
WHERE alarm.type = %s
GROUP BY rel.calendar_event_id
"""
base_request = """
SELECT
cal.id,
cal.start - interval '1' minute * calcul_delta.max_delta AS first_alarm,
CASE
WHEN cal.recurrency THEN cal.final_date - interval '1' minute * calcul_delta.min_delta
ELSE cal.stop - interval '1' minute * calcul_delta.min_delta
END as last_alarm,
cal.start as first_event_date,
CASE
WHEN cal.recurrency THEN cal.final_date
ELSE cal.stop
END as last_event_date,
calcul_delta.min_delta,
calcul_delta.max_delta,
cal.rrule AS rule
FROM
calendar_event AS cal
RIGHT JOIN calcul_delta ON calcul_delta.calendar_event_id = cal.id
"""
filter_user = """
RIGHT JOIN calendar_event_res_partner_rel AS part_rel ON part_rel.calendar_event_id = cal.id
AND part_rel.res_partner_id = %s
"""
# Add filter on alarm type
tuple_params = (alarm_type,)
# Add filter on partner_id
if partner_id:
base_request += filter_user
tuple_params += (partner_id, )
# Upper bound on first_alarm of requested events
first_alarm_max_value = ""
if seconds is None:
# first alarm in the future + 3 minutes if there is one, now otherwise
first_alarm_max_value = """
COALESCE((SELECT MIN(cal.start - interval '1' minute * calcul_delta.max_delta)
FROM calendar_event cal
RIGHT JOIN calcul_delta ON calcul_delta.calendar_event_id = cal.id
WHERE cal.start - interval '1' minute * calcul_delta.max_delta > now() at time zone 'utc'
) + interval '3' minute, now() at time zone 'utc')"""
else:
# now + given seconds
first_alarm_max_value = "(now() at time zone 'utc' + interval '%s' second )"
tuple_params += (seconds,)
self._cr.execute("""
WITH calcul_delta AS (%s)
SELECT *
FROM ( %s WHERE cal.active = True ) AS ALL_EVENTS
WHERE ALL_EVENTS.first_alarm < %s
AND ALL_EVENTS.last_event_date > (now() at time zone 'utc')
""" % (delta_request, base_request, first_alarm_max_value), tuple_params)
for event_id, first_alarm, last_alarm, first_meeting, last_meeting, min_duration, max_duration, rule in self._cr.fetchall():
result[event_id] = {
'event_id': event_id,
'first_alarm': first_alarm,
'last_alarm': last_alarm,
'first_meeting': first_meeting,
'last_meeting': last_meeting,
'min_duration': min_duration,
'max_duration': max_duration,
'rrule': rule
}
return result
def do_check_alarm_for_one_date(self, one_date, event, event_maxdelta, in_the_next_X_seconds, alarm_type, after=False, missing=False):
""" Search for some alarms in the interval of time determined by some parameters (after, in_the_next_X_seconds, ...)
:param one_date: date of the event to check (not the same that in the event browse if recurrent)
:param event: Event browse record
:param event_maxdelta: biggest duration from alarms for this event
:param in_the_next_X_seconds: looking in the future (in seconds)
:param after: if not False: will return alert if after this date (date as string - todo: change in master)
:param missing: if not False: will return alert even if we are too late
:param notif: Looking for type notification
:param mail: looking for type email
"""
result = []
# TODO: remove event_maxdelta and if using it
if one_date - timedelta(minutes=(missing and 0 or event_maxdelta)) < datetime.now() + timedelta(seconds=in_the_next_X_seconds): # if an alarm is possible for this date
for alarm in event.alarm_ids:
if alarm.type == alarm_type and \
one_date - timedelta(minutes=(missing and 0 or alarm.duration_minutes)) < datetime.now() + timedelta(seconds=in_the_next_X_seconds) and \
(not after or one_date - timedelta(minutes=alarm.duration_minutes) > fields.Datetime.from_string(after)):
alert = {
'alarm_id': alarm.id,
'event_id': event.id,
'notify_at': one_date - timedelta(minutes=alarm.duration_minutes),
}
result.append(alert)
return result
@api.model
def get_next_mail(self):
now = fields.Datetime.now()
last_notif_mail = self.env['ir.config_parameter'].sudo().get_param('calendar.last_notif_mail', default=now)
try:
cron = self.env['ir.model.data'].sudo().get_object('calendar', 'ir_cron_scheduler_alarm')
except ValueError:
_logger.error("Cron for " + self._name + " can not be identified !")
return False
interval_to_second = {
"weeks": 7 * 24 * 60 * 60,
"days": 24 * 60 * 60,
"hours": 60 * 60,
"minutes": 60,
"seconds": 1
}
if cron.interval_type not in interval_to_second:
_logger.error("Cron delay can not be computed !")
return False
cron_interval = cron.interval_number * interval_to_second[cron.interval_type]
all_meetings = self.get_next_potential_limit_alarm('email', seconds=cron_interval)
for meeting in self.env['calendar.event'].browse(all_meetings):
max_delta = all_meetings[meeting.id]['max_duration']
if meeting.recurrency:
at_least_one = False
last_found = False
for one_date in meeting._get_recurrent_date_by_event():
in_date_format = one_date.replace(tzinfo=None)
last_found = self.do_check_alarm_for_one_date(in_date_format, meeting, max_delta, 0, 'email', after=last_notif_mail, missing=True)
for alert in last_found:
self.do_mail_reminder(alert)
at_least_one = True # if it's the first alarm for this recurrent event
if at_least_one and not last_found: # if the precedent event had an alarm but not this one, we can stop the search for this event
break
else:
in_date_format = datetime.strptime(meeting.start, DEFAULT_SERVER_DATETIME_FORMAT)
last_found = self.do_check_alarm_for_one_date(in_date_format, meeting, max_delta, 0, 'email', after=last_notif_mail, missing=True)
for alert in last_found:
self.do_mail_reminder(alert)
self.env['ir.config_parameter'].sudo().set_param('calendar.last_notif_mail', now)
@api.model
def get_next_notif(self):
partner = self.env.user.partner_id
all_notif = []
if not partner:
return []
all_meetings = self.get_next_potential_limit_alarm('notification', partner_id=partner.id)
time_limit = 3600 * 24 # return alarms of the next 24 hours
for event_id in all_meetings:
max_delta = all_meetings[event_id]['max_duration']
meeting = self.env['calendar.event'].browse(event_id)
if meeting.recurrency:
b_found = False
last_found = False
for one_date in meeting._get_recurrent_date_by_event():
in_date_format = one_date.replace(tzinfo=None)
last_found = self.do_check_alarm_for_one_date(in_date_format, meeting, max_delta, time_limit, 'notification', after=partner.calendar_last_notif_ack)
if last_found:
for alert in last_found:
all_notif.append(self.do_notif_reminder(alert))
if not b_found: # if it's the first alarm for this recurrent event
b_found = True
if b_found and not last_found: # if the precedent event had alarm but not this one, we can stop the search fot this event
break
else:
in_date_format = fields.Datetime.from_string(meeting.start)
last_found = self.do_check_alarm_for_one_date(in_date_format, meeting, max_delta, time_limit, 'notification', after=partner.calendar_last_notif_ack)
if last_found:
for alert in last_found:
all_notif.append(self.do_notif_reminder(alert))
return all_notif
def do_mail_reminder(self, alert):
meeting = self.env['calendar.event'].browse(alert['event_id'])
alarm = self.env['calendar.alarm'].browse(alert['alarm_id'])
result = False
if alarm.type == 'email':
result = meeting.attendee_ids._send_mail_to_attendees('calendar.calendar_template_meeting_reminder', force_send=True)
return result
def do_notif_reminder(self, alert):
alarm = self.env['calendar.alarm'].browse(alert['alarm_id'])
meeting = self.env['calendar.event'].browse(alert['event_id'])
if alarm.type == 'notification':
message = meeting.display_time
delta = alert['notify_at'] - datetime.now()
delta = delta.seconds + delta.days * 3600 * 24
return {
'event_id': meeting.id,
'title': meeting.name,
'message': message,
'timer': delta,
'notify_at': fields.Datetime.to_string(alert['notify_at']),
}
def notify_next_alarm(self, partner_ids):
""" Sends through the bus the next alarm of given partners """
notifications = []
users = self.env['res.users'].search([('partner_id', 'in', tuple(partner_ids))])
for user in users:
notif = self.sudo(user.id).get_next_notif()
notifications.append([(self._cr.dbname, 'calendar.alarm', user.partner_id.id), notif])
if len(notifications) > 0:
self.env['bus.bus'].sendmany(notifications)
class Alarm(models.Model):
_name = 'calendar.alarm'
_description = 'Event alarm'
@api.depends('interval', 'duration')
def _compute_duration_minutes(self):
for alarm in self:
if alarm.interval == "minutes":
alarm.duration_minutes = alarm.duration
elif alarm.interval == "hours":
alarm.duration_minutes = alarm.duration * 60
elif alarm.interval == "days":
alarm.duration_minutes = alarm.duration * 60 * 24
else:
alarm.duration_minutes = 0
_interval_selection = {'minutes': 'Minute(s)', 'hours': 'Hour(s)', 'days': 'Day(s)'}
name = fields.Char('Name', required=True)
type = fields.Selection([('notification', 'Notification'), ('email', 'Email')], 'Type', required=True, default='email')
duration = fields.Integer('Remind Before', required=True, default=1)
interval = fields.Selection(list(_interval_selection.items()), 'Unit', required=True, default='hours')
duration_minutes = fields.Integer('Duration in minutes', compute='_compute_duration_minutes', store=True, help="Duration in minutes")
@api.onchange('duration', 'interval')
def _onchange_duration_interval(self):
display_interval = self._interval_selection.get(self.interval, '')
self.name = str(self.duration) + ' ' + display_interval
def _update_cron(self):
try:
cron = self.env['ir.model.data'].sudo().get_object('calendar', 'ir_cron_scheduler_alarm')
except ValueError:
return False
return cron.toggle(model=self._name, domain=[('type', '=', 'email')])
@api.model
def create(self, values):
result = super(Alarm, self).create(values)
self._update_cron()
return result
@api.multi
def write(self, values):
result = super(Alarm, self).write(values)
self._update_cron()
return result
@api.multi
def unlink(self):
result = super(Alarm, self).unlink()
self._update_cron()
return result
class MeetingType(models.Model):
_name = 'calendar.event.type'
_description = 'Meeting Type'
name = fields.Char('Name', required=True)
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists !"),
]
class Meeting(models.Model):
""" Model for Calendar Event
Special context keys :
- `no_mail_to_attendees` : disabled sending email to attendees when creating/editing a meeting
"""
_name = 'calendar.event'
_description = "Event"
_order = "id desc"
_inherit = ["mail.thread"]
@api.model
def default_get(self, fields):
# super default_model='crm.lead' for easier use in adddons
if self.env.context.get('default_res_model') and not self.env.context.get('default_res_model_id'):
self = self.with_context(
default_res_model_id=self.env['ir.model'].sudo().search([
('model', '=', self.env.context['default_res_model'])
], limit=1).id
)
defaults = super(Meeting, self).default_get(fields)
# support active_model / active_id as replacement of default_* if not already given
if 'res_model_id' not in defaults and 'res_model_id' in fields and \
self.env.context.get('active_model') and self.env.context['active_model'] != 'calendar.event':
defaults['res_model_id'] = self.env['ir.model'].sudo().search([('model', '=', self.env.context['active_model'])], limit=1).id
if 'res_id' not in defaults and 'res_id' in fields and \
defaults.get('res_model_id') and self.env.context.get('active_id'):
defaults['res_id'] = self.env.context['active_id']
return defaults
@api.model
def _default_partners(self):
""" When active_model is res.partner, the current partners should be attendees """
partners = self.env.user.partner_id
active_id = self._context.get('active_id')
if self._context.get('active_model') == 'res.partner' and active_id:
if active_id not in partners.ids:
partners |= self.env['res.partner'].browse(active_id)
return partners
@api.multi
def _get_recurrent_dates_by_event(self):
""" Get recurrent start and stop dates based on Rule string"""
start_dates = self._get_recurrent_date_by_event(date_field='start')
stop_dates = self._get_recurrent_date_by_event(date_field='stop')
return list(pycompat.izip(start_dates, stop_dates))
@api.multi
def _get_recurrent_date_by_event(self, date_field='start'):
""" Get recurrent dates based on Rule string and all event where recurrent_id is child
date_field: the field containing the reference date information for recurrency computation
"""
self.ensure_one()
if date_field in self._fields and self._fields[date_field].type in ('date', 'datetime'):
reference_date = self[date_field]
else:
reference_date = self.start
def todate(date):
val = parser.parse(''.join((re.compile('\d')).findall(date)))
## Dates are localized to saved timezone if any, else current timezone.
if not val.tzinfo:
val = pytz.UTC.localize(val)
return val.astimezone(timezone)
timezone = pytz.timezone(self._context.get('tz') or 'UTC')
event_date = pytz.UTC.localize(fields.Datetime.from_string(reference_date)) # Add "+hh:mm" timezone
if not event_date:
event_date = datetime.now()
if self.allday and self.rrule and 'UNTIL' in self.rrule and 'Z' not in self.rrule:
rset1 = rrule.rrulestr(str(self.rrule), dtstart=event_date.replace(tzinfo=None), forceset=True, ignoretz=True)
else:
# Convert the event date to saved timezone (or context tz) as it'll
# define the correct hour/day asked by the user to repeat for recurrence.
event_date = event_date.astimezone(timezone) # transform "+hh:mm" timezone
rset1 = rrule.rrulestr(str(self.rrule), dtstart=event_date, forceset=True, tzinfos={})
recurring_meetings = self.search([('recurrent_id', '=', self.id), '|', ('active', '=', False), ('active', '=', True)])
for meeting in recurring_meetings:
rset1._exdate.append(todate(meeting.recurrent_id_date))
return [d.astimezone(pytz.UTC) if d.tzinfo else d for d in rset1]
@api.multi
def _get_recurrency_end_date(self):
""" Return the last date a recurring event happens, according to its end_type. """
self.ensure_one()
data = self.read(['final_date', 'recurrency', 'rrule_type', 'count', 'end_type', 'stop', 'interval'])[0]
if not data.get('recurrency'):
return False
end_type = data.get('end_type')
final_date = data.get('final_date')
if end_type == 'count' and all(data.get(key) for key in ['count', 'rrule_type', 'stop', 'interval']):
count = (data['count'] + 1) * data['interval']
delay, mult = {
'daily': ('days', 1),
'weekly': ('days', 7),
'monthly': ('months', 1),
'yearly': ('years', 1),
}[data['rrule_type']]
deadline = fields.Datetime.from_string(data['stop'])
return deadline + relativedelta(**{delay: count * mult})
return final_date
@api.multi
def _find_my_attendee(self):
""" Return the first attendee where the user connected has been invited
from all the meeting_ids in parameters.
"""
self.ensure_one()
for attendee in self.attendee_ids:
if self.env.user.partner_id == attendee.partner_id:
return attendee
return False
@api.model
def _get_date_formats(self):
""" get current date and time format, according to the context lang
:return: a tuple with (format date, format time)
"""
lang = self._context.get("lang")
lang_params = {}
if lang:
record_lang = self.env['res.lang'].search([("code", "=", lang)], limit=1)
lang_params = {
'date_format': record_lang.date_format,
'time_format': record_lang.time_format
}
# formats will be used for str{f,p}time() which do not support unicode in Python 2, coerce to str
format_date = pycompat.to_native(lang_params.get("date_format", '%B-%d-%Y'))
format_time = pycompat.to_native(lang_params.get("time_format", '%I-%M %p'))
return (format_date, format_time)
@api.model
def _get_recurrent_fields(self):
return ['byday', 'recurrency', 'final_date', 'rrule_type', 'month_by',
'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa',
'su', 'day', 'week_list']
@api.model
def _get_display_time(self, start, stop, zduration, zallday):
""" Return date and time (from to from) based on duration with timezone in string. Eg :
1) if user add duration for 2 hours, return : August-23-2013 at (04-30 To 06-30) (Europe/Brussels)
2) if event all day ,return : AllDay, July-31-2013
"""
timezone = self._context.get('tz')
if not timezone:
timezone = self.env.user.partner_id.tz or 'UTC'
timezone = tools.ustr(timezone).encode('utf-8') # make safe for str{p,f}time()
# get date/time format according to context
format_date, format_time = self.with_context(tz=timezone)._get_date_formats()
# convert date and time into user timezone
date = fields.Datetime.context_timestamp(self.with_context(tz=timezone), fields.Datetime.from_string(start))
date_deadline = fields.Datetime.context_timestamp(self.with_context(tz=timezone), fields.Datetime.from_string(stop))
# convert into string the date and time, using user formats
date_str = date.strftime(format_date)
time_str = date.strftime(format_time)
if zallday:
display_time = _("AllDay , %s") % (date_str)
elif zduration < 24:
duration = date + timedelta(hours=zduration)
display_time = _("%s at (%s To %s) (%s)") % (date_str, time_str, duration.strftime(format_time), timezone)
else:
display_time = _("%s at %s To\n %s at %s (%s)") % (date_str, time_str, date_deadline.strftime(format_date), date_deadline.strftime(format_time), timezone)
return display_time
def _get_duration(self, start, stop):
""" Get the duration value between the 2 given dates. """
if start and stop:
diff = fields.Datetime.from_string(stop) - fields.Datetime.from_string(start)
if diff:
duration = float(diff.days) * 24 + (float(diff.seconds) / 3600)
return round(duration, 2)
return 0.0
def _compute_is_highlighted(self):
if self.env.context.get('active_model') == 'res.partner':
partner_id = self.env.context.get('active_id')
for event in self:
if event.partner_ids.filtered(lambda s: s.id == partner_id):
event.is_highlighted = True
name = fields.Char('Meeting Subject', required=True, states={'done': [('readonly', True)]})
state = fields.Selection([('draft', 'Unconfirmed'), ('open', 'Confirmed')], string='Status', readonly=True, track_visibility='onchange', default='draft')
is_attendee = fields.Boolean('Attendee', compute='_compute_attendee')
attendee_status = fields.Selection(Attendee.STATE_SELECTION, string='Attendee Status', compute='_compute_attendee')
display_time = fields.Char('Event Time', compute='_compute_display_time')
display_start = fields.Char('Date', compute='_compute_display_start', store=True)
start = fields.Datetime('Start', required=True, help="Start date of an event, without time for full days events")
stop = fields.Datetime('Stop', required=True, help="Stop date of an event, without time for full days events")
allday = fields.Boolean('All Day', states={'done': [('readonly', True)]}, default=False)
start_date = fields.Date('Start Date', compute='_compute_dates', inverse='_inverse_dates', store=True, states={'done': [('readonly', True)]}, track_visibility='onchange')
start_datetime = fields.Datetime('Start DateTime', compute='_compute_dates', inverse='_inverse_dates', store=True, states={'done': [('readonly', True)]}, track_visibility='onchange')
stop_date = fields.Date('End Date', compute='_compute_dates', inverse='_inverse_dates', store=True, states={'done': [('readonly', True)]}, track_visibility='onchange')
stop_datetime = fields.Datetime('End Datetime', compute='_compute_dates', inverse='_inverse_dates', store=True, states={'done': [('readonly', True)]}, track_visibility='onchange') # old date_deadline
duration = fields.Float('Duration', states={'done': [('readonly', True)]})
description = fields.Text('Description', states={'done': [('readonly', True)]})
privacy = fields.Selection([('public', 'Everyone'), ('private', 'Only me'), ('confidential', 'Only internal users')], 'Privacy', default='public', states={'done': [('readonly', True)]}, oldname="class")
location = fields.Char('Location', states={'done': [('readonly', True)]}, track_visibility='onchange', help="Location of Event")
show_as = fields.Selection([('free', 'Free'), ('busy', 'Busy')], 'Show Time as', states={'done': [('readonly', True)]}, default='busy')
# linked document
res_id = fields.Integer('Document ID')
res_model_id = fields.Many2one('ir.model', 'Document Model', ondelete='cascade')
res_model = fields.Char('Document Model Name', related='res_model_id.model', readonly=True, store=True)
activity_ids = fields.One2many('mail.activity', 'calendar_event_id', string='Activities')
# RECURRENCE FIELD
rrule = fields.Char('Recurrent Rule', compute='_compute_rrule', inverse='_inverse_rrule', store=True)
rrule_type = fields.Selection([
('daily', 'Day(s)'),
('weekly', 'Week(s)'),
('monthly', 'Month(s)'),
('yearly', 'Year(s)')
], string='Recurrency', states={'done': [('readonly', True)]}, help="Let the event automatically repeat at that interval")
recurrency = fields.Boolean('Recurrent', help="Recurrent Meeting")
recurrent_id = fields.Integer('Recurrent ID')
recurrent_id_date = fields.Datetime('Recurrent ID date')
end_type = fields.Selection([
('count', 'Number of repetitions'),
('end_date', 'End date')
], string='Recurrence Termination', default='count')
interval = fields.Integer(string='Repeat Every', default=1, help="Repeat every (Days/Week/Month/Year)")
count = fields.Integer(string='Repeat', help="Repeat x times", default=1)
mo = fields.Boolean('Mon')
tu = fields.Boolean('Tue')
we = fields.Boolean('Wed')
th = fields.Boolean('Thu')
fr = fields.Boolean('Fri')
sa = fields.Boolean('Sat')
su = fields.Boolean('Sun')
month_by = fields.Selection([
('date', 'Date of month'),
('day', 'Day of month')
], string='Option', default='date', oldname='select1')
day = fields.Integer('Date of month', default=1)
week_list = fields.Selection([
('MO', 'Monday'),
('TU', 'Tuesday'),
('WE', 'Wednesday'),
('TH', 'Thursday'),
('FR', 'Friday'),
('SA', 'Saturday'),
('SU', 'Sunday')
], string='Weekday')
byday = fields.Selection([
('1', 'First'),
('2', 'Second'),
('3', 'Third'),
('4', 'Fourth'),
('5', 'Fifth'),
('-1', 'Last')
], string='By day')
final_date = fields.Date('Repeat Until')
user_id = fields.Many2one('res.users', 'Responsible', states={'done': [('readonly', True)]}, default=lambda self: self.env.user)
partner_id = fields.Many2one('res.partner', string='Responsible', related='user_id.partner_id', readonly=True)
active = fields.Boolean('Active', default=True, help="If the active field is set to false, it will allow you to hide the event alarm information without removing it.")
categ_ids = fields.Many2many('calendar.event.type', 'meeting_category_rel', 'event_id', 'type_id', 'Tags')
attendee_ids = fields.One2many('calendar.attendee', 'event_id', 'Participant', ondelete='cascade')
partner_ids = fields.Many2many('res.partner', 'calendar_event_res_partner_rel', string='Attendees', states={'done': [('readonly', True)]}, default=_default_partners)
alarm_ids = fields.Many2many('calendar.alarm', 'calendar_alarm_calendar_event_rel', string='Reminders', ondelete="restrict", copy=False)
is_highlighted = fields.Boolean(compute='_compute_is_highlighted', string='# Meetings Highlight')
@api.multi
def _compute_attendee(self):
for meeting in self:
attendee = meeting._find_my_attendee()
meeting.is_attendee = bool(attendee)
meeting.attendee_status = attendee.state if attendee else 'needsAction'
@api.multi
def _compute_display_time(self):
for meeting in self:
meeting.display_time = self._get_display_time(meeting.start, meeting.stop, meeting.duration, meeting.allday)
@api.multi
@api.depends('allday', 'start_date', 'start_datetime')
def _compute_display_start(self):
for meeting in self:
meeting.display_start = meeting.start_date if meeting.allday else meeting.start_datetime
@api.multi
@api.depends('allday', 'start', 'stop')
def _compute_dates(self):
""" Adapt the value of start_date(time)/stop_date(time) according to start/stop fields and allday. Also, compute
the duration for not allday meeting ; otherwise the duration is set to zero, since the meeting last all the day.
"""
for meeting in self:
if meeting.allday:
meeting.start_date = meeting.start
meeting.start_datetime = False
meeting.stop_date = meeting.stop
meeting.stop_datetime = False
meeting.duration = 0.0
else:
meeting.start_date = False
meeting.start_datetime = meeting.start
meeting.stop_date = False
meeting.stop_datetime = meeting.stop
meeting.duration = self._get_duration(meeting.start, meeting.stop)
@api.multi
def _inverse_dates(self):
for meeting in self:
if meeting.allday:
tz = pytz.timezone(self.env.user.tz) if self.env.user.tz else pytz.utc
enddate = fields.Datetime.from_string(meeting.stop_date)
enddate = tz.localize(enddate)
enddate = enddate.replace(hour=18)
enddate = enddate.astimezone(pytz.utc)
meeting.stop = fields.Datetime.to_string(enddate)
startdate = fields.Datetime.from_string(meeting.start_date)
startdate = tz.localize(startdate) # Add "+hh:mm" timezone
startdate = startdate.replace(hour=8) # Set 8 AM in localtime
startdate = startdate.astimezone(pytz.utc) # Convert to UTC
meeting.start = fields.Datetime.to_string(startdate)
else:
meeting.start = meeting.start_datetime
meeting.stop = meeting.stop_datetime
@api.depends('byday', 'recurrency', 'final_date', 'rrule_type', 'month_by', 'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa', 'su', 'day', 'week_list')
def _compute_rrule(self):
""" Gets Recurrence rule string according to value type RECUR of iCalendar from the values given.
:return dictionary of rrule value.
"""
for meeting in self:
if meeting.recurrency:
meeting.rrule = meeting._rrule_serialize()
else:
meeting.rrule = ''
@api.multi
def _inverse_rrule(self):
for meeting in self:
if meeting.rrule:
data = self._rrule_default_values()
data['recurrency'] = True
data.update(self._rrule_parse(meeting.rrule, data, meeting.start))
meeting.update(data)
@api.constrains('start_datetime', 'stop_datetime', 'start_date', 'stop_date')
def _check_closing_date(self):
for meeting in self:
if meeting.start_datetime and meeting.stop_datetime and meeting.stop_datetime < meeting.start_datetime:
raise ValidationError(_('Ending datetime cannot be set before starting datetime.'))
if meeting.start_date and meeting.stop_date and meeting.stop_date < meeting.start_date:
raise ValidationError(_('Ending date cannot be set before starting date.'))
@api.onchange('start_datetime', 'duration')
def _onchange_duration(self):
if self.start_datetime:
start = fields.Datetime.from_string(self.start_datetime)
self.start = self.start_datetime
self.stop = fields.Datetime.to_string(start + timedelta(hours=self.duration))
####################################################
# Calendar Business, Reccurency, ...
####################################################
@api.multi
def get_ics_file(self):
""" Returns iCalendar file for the event invitation.
:returns a dict of .ics file content for each meeting
"""
result = {}
def ics_datetime(idate, allday=False):
if idate:
if allday:
return fields.Date.from_string(idate)
else:
return fields.Datetime.from_string(idate).replace(tzinfo=pytz.timezone('UTC'))
return False
try:
# FIXME: why isn't this in CalDAV?
import vobject
except ImportError:
_logger.warning("The `vobject` Python module is not installed, so iCal file generation is unavailable. Use 'pip install vobject' to install it")
return result
for meeting in self:
cal = vobject.iCalendar()
event = cal.add('vevent')
if not meeting.start or not meeting.stop:
raise UserError(_("First you have to specify the date of the invitation."))
event.add('created').value = ics_datetime(time.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
event.add('dtstart').value = ics_datetime(meeting.start, meeting.allday)
event.add('dtend').value = ics_datetime(meeting.stop, meeting.allday)
event.add('summary').value = meeting.name
if meeting.description:
event.add('description').value = meeting.description
if meeting.location:
event.add('location').value = meeting.location
if meeting.rrule:
event.add('rrule').value = meeting.rrule
if meeting.alarm_ids:
for alarm in meeting.alarm_ids:
valarm = event.add('valarm')
interval = alarm.interval
duration = alarm.duration
trigger = valarm.add('TRIGGER')
trigger.params['related'] = ["START"]
if interval == 'days':
delta = timedelta(days=duration)
elif interval == 'hours':
delta = timedelta(hours=duration)
elif interval == 'minutes':
delta = timedelta(minutes=duration)
trigger.value = delta
valarm.add('DESCRIPTION').value = alarm.name or u'Odoo'
for attendee in meeting.attendee_ids:
attendee_add = event.add('attendee')
attendee_add.value = u'MAILTO:' + (attendee.email or u'')
result[meeting.id] = cal.serialize().encode('utf-8')
return result
@api.multi
def create_attendees(self):
current_user = self.env.user
result = {}
for meeting in self:
alreay_meeting_partners = meeting.attendee_ids.mapped('partner_id')
meeting_attendees = self.env['calendar.attendee']
meeting_partners = self.env['res.partner']
for partner in meeting.partner_ids.filtered(lambda partner: partner not in alreay_meeting_partners):
values = {
'partner_id': partner.id,
'email': partner.email,
'event_id': meeting.id,
}
# current user don't have to accept his own meeting
if partner == self.env.user.partner_id:
values['state'] = 'accepted'
attendee = self.env['calendar.attendee'].create(values)
meeting_attendees |= attendee
meeting_partners |= partner
if meeting_attendees:
to_notify = meeting_attendees.filtered(lambda a: a.email != current_user.email)
to_notify._send_mail_to_attendees('calendar.calendar_template_meeting_invitation')
meeting.write({'attendee_ids': [(4, meeting_attendee.id) for meeting_attendee in meeting_attendees]})
if meeting_partners:
meeting.message_subscribe(partner_ids=meeting_partners.ids)
# We remove old attendees who are not in partner_ids now.
all_partners = meeting.partner_ids
all_partner_attendees = meeting.attendee_ids.mapped('partner_id')
old_attendees = meeting.attendee_ids
partners_to_remove = all_partner_attendees + meeting_partners - all_partners
attendees_to_remove = self.env["calendar.attendee"]
if partners_to_remove:
attendees_to_remove = self.env["calendar.attendee"].search([('partner_id', 'in', partners_to_remove.ids), ('event_id', '=', meeting.id)])
attendees_to_remove.unlink()
result[meeting.id] = {
'new_attendees': meeting_attendees,
'old_attendees': old_attendees,
'removed_attendees': attendees_to_remove,
'removed_partners': partners_to_remove
}
return result
@api.multi
def get_search_fields(self, order_fields, r_date=None):
sort_fields = {}
for field in order_fields:
if field == 'id' and r_date:
sort_fields[field] = real_id2calendar_id(self.id, r_date)
else:
sort_fields[field] = self[field]
if isinstance(self[field], models.BaseModel):
name_get = self[field].name_get()
if len(name_get) and len(name_get[0]) >= 2:
sort_fields[field] = name_get[0][1]
if r_date:
sort_fields['sort_start'] = r_date.strftime(VIRTUALID_DATETIME_FORMAT)
else:
display_start = self.display_start
sort_fields['sort_start'] = display_start.replace(' ', '').replace('-', '') if display_start else False
return sort_fields
@api.multi
def get_recurrent_ids(self, domain, order=None):
""" Gives virtual event ids for recurring events. This method gives ids of dates
that comes between start date and end date of calendar views
:param order: The fields (comma separated, format "FIELD {DESC|ASC}") on which
the events should be sorted
"""
if order:
order_fields = [field.split()[0] for field in order.split(',')]
else:
# fallback on self._order defined on the model
order_fields = [field.split()[0] for field in self._order.split(',')]
if 'id' not in order_fields:
order_fields.append('id')
result_data = []
result = []
for meeting in self:
if not meeting.recurrency or not meeting.rrule:
result.append(meeting.id)
result_data.append(meeting.get_search_fields(order_fields))
continue
rdates = meeting._get_recurrent_dates_by_event()
for r_start_date, r_stop_date in rdates:
# fix domain evaluation
# step 1: check date and replace expression by True or False, replace other expressions by True
# step 2: evaluation of & and |
# check if there are one False
pile = []
ok = True
r_date = r_start_date # default for empty domain
for arg in domain:
if str(arg[0]) in ('start', 'stop', 'final_date'):
if str(arg[0]) == 'start':
r_date = r_start_date
else:
r_date = r_stop_date
if arg[2] and len(arg[2]) > len(r_date.strftime(DEFAULT_SERVER_DATE_FORMAT)):
dformat = DEFAULT_SERVER_DATETIME_FORMAT
else:
dformat = DEFAULT_SERVER_DATE_FORMAT
if (arg[1] == '='):
ok = r_date.strftime(dformat) == arg[2]
if (arg[1] == '>'):
ok = r_date.strftime(dformat) > arg[2]
if (arg[1] == '<'):
ok = r_date.strftime(dformat) < arg[2]
if (arg[1] == '>='):
ok = r_date.strftime(dformat) >= arg[2]
if (arg[1] == '<='):
ok = r_date.strftime(dformat) <= arg[2]
if (arg[1] == '!='):
ok = r_date.strftime(dformat) != arg[2]
pile.append(ok)
elif str(arg) == str('&') or str(arg) == str('|'):
pile.append(arg)
else:
pile.append(True)
pile.reverse()
new_pile = []
for item in pile:
if not isinstance(item, pycompat.string_types):
res = item
elif str(item) == str('&'):
first = new_pile.pop()
second = new_pile.pop()
res = first and second
elif str(item) == str('|'):
first = new_pile.pop()
second = new_pile.pop()
res = first or second
new_pile.append(res)
if [True for item in new_pile if not item]:
continue
result_data.append(meeting.get_search_fields(order_fields, r_date=r_start_date))
# seq of (field, should_reverse)
sort_spec = list(tools.unique(
(sort_remap(key.split()[0]), key.lower().endswith(' desc'))
for key in (order or self._order).split(',')
))
def key(record):
return [
tools.Reverse(record[name]) if desc else record[name]
for name, desc in sort_spec
]
return [r['id'] for r in sorted(result_data, key=key)]
@api.multi
def _rrule_serialize(self):
""" Compute rule string according to value type RECUR of iCalendar
:return: string containing recurring rule (empty if no rule)
"""
if self.interval and self.interval < 0:
raise UserError(_('interval cannot be negative.'))
if self.count and self.count <= 0:
raise UserError(_('Event recurrence interval cannot be negative.'))
def get_week_string(freq):
weekdays = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
if freq == 'weekly':
byday = [field.upper() for field in weekdays if self[field]]
if byday:
return ';BYDAY=' + ','.join(byday)
return ''
def get_month_string(freq):
if freq == 'monthly':
if self.month_by == 'date' and (self.day < 1 or self.day > 31):
raise UserError(_("Please select a proper day of the month."))
if self.month_by == 'day' and self.byday and self.week_list: # Eg : Second Monday of the month
return ';BYDAY=' + self.byday + self.week_list
elif self.month_by == 'date': # Eg : 16th of the month
return ';BYMONTHDAY=' + str(self.day)
return ''
def get_end_date():
end_date_new = ''.join((re.compile('\d')).findall(self.final_date)) + 'T235959Z' if self.final_date else False
return (self.end_type == 'count' and (';COUNT=' + str(self.count)) or '') +\
((end_date_new and self.end_type == 'end_date' and (';UNTIL=' + end_date_new)) or '')
freq = self.rrule_type # day/week/month/year
result = ''
if freq:
interval_srting = self.interval and (';INTERVAL=' + str(self.interval)) or ''
result = 'FREQ=' + freq.upper() + get_week_string(freq) + interval_srting + get_end_date() + get_month_string(freq)
return result
def _rrule_default_values(self):
return {
'byday': False,
'recurrency': False,
'final_date': False,
'rrule_type': False,
'month_by': False,
'interval': 0,
'count': False,
'end_type': False,
'mo': False,
'tu': False,
'we': False,
'th': False,
'fr': False,
'sa': False,
'su': False,
'day': False,
'week_list': False
}
def _rrule_parse(self, rule_str, data, date_start):
day_list = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
rrule_type = ['yearly', 'monthly', 'weekly', 'daily']
rule = rrule.rrulestr(rule_str, dtstart=fields.Datetime.from_string(date_start))
if rule._freq > 0 and rule._freq < 4:
| data['rrule_type'] = rrule_type[rule._freq] | 4,433 | lcc_e | python | null | 03cceaae7e78f9d216abf00dd46b4252a23ef664a49a90af |
|
#! /usr/bin/env python
from miasm.loader.cstruct import CStruct
class Ehdr(CStruct):
_fields = [ ("ident","16s"),
("type","u16"),
("machine","u16"),
("version","u32"),
("entry","ptr"),
("phoff","ptr"),
("shoff","ptr"),
("flags","u32"),
("ehsize","u16"),
("phentsize","u16"),
("phnum","u16"),
("shentsize","u16"),
("shnum","u16"),
("shstrndx","u16") ]
class Shdr(CStruct):
_fields = [ ("name","u32"),
("type","u32"),
("flags","ptr"),
("addr","ptr"),
("offset","ptr"),
("size","ptr"),
("link","u32"),
("info","u32"),
("addralign","ptr"),
("entsize","ptr") ]
class Phdr(CStruct):
_fields = [ ("type","u32"),
("offset","u32"),
("vaddr","u32"),
("paddr","u32"),
("filesz","u32"),
("memsz","u32"),
("flags","u32"),
("align","u32") ]
class Phdr64(CStruct):
_fields = [ ("type","u32"),
("flags","u32"),
("offset","ptr"),
("vaddr","ptr"),
("paddr","ptr"),
("filesz","ptr"),
("memsz","ptr"),
("align","ptr") ]
class Nhdr(CStruct):
_fields = [ ("namesz","u32"),
("descsz","u32"),
("type", "u32") ]
class Sym32(CStruct):
_fields = [ ("name","u32"),
("value","u32"),
("size","u32"),
("info","u08"),
("other","u08"),
("shndx","u16") ]
class Sym64(CStruct):
_fields = [ ("name","u32"),
("info","u08"),
("other","u08"),
("shndx","u16"),
("value","u64"),
("size","u64") ]
class Dym(CStruct):
_fields = [ ("tag","u32"),
("val","u32") ]
class Rel32(CStruct):
_fields = [ ("offset","ptr"),
("info","u32") ]
class Rel64(CStruct):
_fields = [ ("offset","ptr"),
("info","u64") ]
class Rela32(CStruct):
_fields = [ ("offset","ptr"),
("info","u32"),
("addend","ptr") ]
class Rela64(CStruct):
_fields = [ ("offset","ptr"),
("info","u64"),
("addend","ptr") ]
class Dynamic(CStruct):
_fields = [ ("type","ptr"),
("name","ptr") ]
# Legal values for e_ident (identification indexes)
EI_MAG0 = 0 # File identification
EI_MAG1 = 1 # File identification
EI_MAG2 = 2 # File identification
EI_MAG3 = 3 # File identification
EI_CLASS = 4 # File class
EI_DATA = 5 # Data encoding
EI_VERSION = 6 # File version
EI_OSABI = 7 # Operating system/ABI identification
EI_ABIVERSION = 8 # ABI version
EI_PAD = 9 # Start of padding bytes
EI_NIDENT = 16 # Size of e_ident[]
# Legal values for e_ident[EI_CLASS]
ELFCLASSNONE = 0 # Invalid class
ELFCLASS32 = 1 # 32-bit objects
ELFCLASS64 = 2 # 64-bit objects
# Legal values for e_ident[EI_DATA]
ELFDATANONE = 0 # Invalid data encoding
ELFDATA2LSB = 1 # Least significant byte at lowest address
ELFDATA2MSB = 2 # Most significant byte at lowest address
# Legal values for e_type (object file type).
ET_NONE = 0 # No file type
ET_REL = 1 # Relocatable file
ET_EXEC = 2 # Executable file
ET_DYN = 3 # Shared object file
ET_CORE = 4 # Core file
ET_NUM = 5 # Number of defined types
ET_LOOS = 0xfe00 # OS-specific range start
ET_HIOS = 0xfeff # OS-specific range end
ET_LOPROC = 0xff00 # Processor-specific range start
ET_HIPROC = 0xffff # Processor-specific range end
# Legal values for e_machine (architecture).
EM_NONE = 0 # No machine
EM_M32 = 1 # AT&T WE 32100
EM_SPARC = 2 # SUN SPARC
EM_386 = 3 # Intel 80386
EM_68K = 4 # Motorola m68k family
EM_88K = 5 # Motorola m88k family
EM_486 = 6 # Intel 80486
EM_860 = 7 # Intel 80860
EM_MIPS = 8 # MIPS R3000 big-endian
EM_S370 = 9 # IBM System/370
EM_MIPS_RS3_LE = 10 # MIPS R3000 little-endian
EM_PARISC = 15 # HPPA
EM_VPP500 = 17 # Fujitsu VPP500
EM_SPARC32PLUS = 18 # Sun's "v8plus"
EM_960 = 19 # Intel 80960
EM_PPC = 20 # PowerPC
EM_PPC64 = 21 # PowerPC 64-bit
EM_S390 = 22 # IBM S390
EM_V800 = 36 # NEC V800 series
EM_FR20 = 37 # Fujitsu FR20
EM_RH32 = 38 # TRW RH-32
EM_RCE = 39 # Motorola RCE
EM_ARM = 40 # ARM
EM_FAKE_ALPHA = 41 # Digital Alpha
EM_SH = 42 # Hitachi SH
EM_SPARCV9 = 43 # SPARC v9 64-bit
EM_TRICORE = 44 # Siemens Tricore
EM_ARC = 45 # Argonaut RISC Core
EM_H8_300 = 46 # Hitachi H8/300
EM_H8_300H = 47 # Hitachi H8/300H
EM_H8S = 48 # Hitachi H8S
EM_H8_500 = 49 # Hitachi H8/500
EM_IA_64 = 50 # Intel Merced
EM_MIPS_X = 51 # Stanford MIPS-X
EM_COLDFIRE = 52 # Motorola Coldfire
EM_68HC12 = 53 # Motorola M68HC12
EM_MMA = 54 # Fujitsu MMA Multimedia Accelerator*/
EM_PCP = 55 # Siemens PCP
EM_NCPU = 56 # Sony nCPU embeeded RISC
EM_NDR1 = 57 # Denso NDR1 microprocessor
EM_STARCORE = 58 # Motorola Start*Core processor
EM_ME16 = 59 # Toyota ME16 processor
EM_ST100 = 60 # STMicroelectronic ST100 processor
EM_TINYJ = 61 # Advanced Logic Corp. Tinyj emb.fam*/
EM_X86_64 = 62 # AMD x86-64 architecture
EM_AARCH64 = 183 # Aarch64 architecture
EM_PDSP = 63 # Sony DSP Processor
EM_FX66 = 66 # Siemens FX66 microcontroller
EM_ST9PLUS = 67 # STMicroelectronics ST9+ 8/16 mc
EM_ST7 = 68 # STmicroelectronics ST7 8 bit mc
EM_68HC16 = 69 # Motorola MC68HC16 microcontroller
EM_68HC11 = 70 # Motorola MC68HC11 microcontroller
EM_68HC08 = 71 # Motorola MC68HC08 microcontroller
EM_68HC05 = 72 # Motorola MC68HC05 microcontroller
EM_SVX = 73 # Silicon Graphics SVx
EM_ST19 = 74 # STMicroelectronics ST19 8 bit mc
EM_VAX = 75 # Digital VAX
EM_CRIS = 76 # Axis Communications 32-bit embedded processor
EM_JAVELIN = 77 # Infineon Technologies 32-bit embedded processor
EM_FIREPATH = 78 # Element 14 64-bit DSP Processor
EM_ZSP = 79 # LSI Logic 16-bit DSP Processor
EM_MMIX = 80 # Donald Knuth's educational 64-bit processor
EM_HUANY = 81 # Harvard University machine-independent object files
EM_PRISM = 82 # SiTera Prism
EM_AVR = 83 # Atmel AVR 8-bit microcontroller
EM_FR30 = 84 # Fujitsu FR30
EM_D10V = 85 # Mitsubishi D10V
EM_D30V = 86 # Mitsubishi D30V
EM_V850 = 87 # NEC v850
EM_M32R = 88 # Mitsubishi M32R
EM_MN10300 = 89 # Matsushita MN10300
EM_MN10200 = 90 # Matsushita MN10200
EM_PJ = 91 # picoJava
EM_OPENRISC = 92 # OpenRISC 32-bit embedded processor
EM_ARC_A5 = 93 # ARC Cores Tangent-A5
EM_XTENSA = 94 # Tensilica Xtensa Architecture
EM_ALPHA = 0x9026
# Legal values for sh_type (section type).
SHT_NULL = 0 # Section header table entry unused
SHT_PROGBITS = 1 # Program data
SHT_SYMTAB = 2 # Symbol table
SHT_STRTAB = 3 # String table
SHT_RELA = 4 # Relocation entries with addends
SHT_HASH = 5 # Symbol hash table
SHT_DYNAMIC = 6 # Dynamic linking information
SHT_NOTE = 7 # Notes
SHT_NOBITS = 8 # Program space with no data (bss)
SHT_REL = 9 # Relocation entries, no addends
SHT_SHLIB = 10 # Reserved
SHT_DYNSYM = 11 # Dynamic linker symbol table
SHT_INIT_ARRAY = 14 # Array of constructors
SHT_FINI_ARRAY = 15 # Array of destructors
SHT_PREINIT_ARRAY = 16 # Array of pre-constructors
SHT_GROUP = 17 # Section group
SHT_SYMTAB_SHNDX = 18 # Extended section indices
SHT_NUM = 19 # Number of defined types.
SHT_LOOS = 0x60000000 # Start OS-specific
SHT_GNU_LIBLIST = 0x6ffffff7 # Prelink library list
SHT_CHECKSUM = 0x6ffffff8 # Checksum for DSO content.
SHT_LOSUNW = 0x6ffffffa # Sun-specific low bound.
SHT_SUNW_move = 0x6ffffffa
SHT_SUNW_COMDAT = 0x6ffffffb
SHT_SUNW_syminfo = 0x6ffffffc
SHT_GNU_verdef = 0x6ffffffd # Version definition section.
SHT_GNU_verneed = 0x6ffffffe # Version needs section.
SHT_GNU_versym = 0x6fffffff # Version symbol table.
SHT_HISUNW = 0x6fffffff # Sun-specific high bound.
SHT_HIOS = 0x6fffffff # End OS-specific type
SHT_LOPROC = 0x70000000 # Start of processor-specific
SHT_HIPROC = 0x7fffffff # End of processor-specific
SHT_LOUSER = 0x80000000 # Start of application-specific
SHT_HIUSER = 0x8fffffff # End of application-specific
# Legal values for sh_flags (section flags).
SHF_WRITE = (1 << 0) # Writable
SHF_ALLOC = (1 << 1) # Occupies memory during execution
SHF_EXECINSTR = (1 << 2) # Executable
SHF_MERGE = (1 << 4) # Might be merged
SHF_STRINGS = (1 << 5) # Contains nul-terminated strings
SHF_INFO_LINK = (1 << 6) # `sh_info' contains SHT index
SHF_LINK_ORDER = (1 << 7) # Preserve order after combining
SHF_OS_NONCONFORMING = (1 << 8) # Non-standard OS specific handling required
SHF_GROUP = (1 << 9) # Section is member of a group.
SHF_TLS = (1 << 10) # Section hold thread-local data.
SHF_MASKOS = 0x0ff00000 # OS-specific.
SHF_MASKPROC = 0xf0000000 # Processor-specific
# Section group handling.
GRP_COMDAT = 0x1 # Mark group as COMDAT.
# Legal values for p_type (segment type).
PT_NULL = 0 # Program header table entry unused
PT_LOAD = 1 # Loadable program segment
PT_DYNAMIC = 2 # Dynamic linking information
PT_INTERP = 3 # Program interpreter
PT_NOTE = 4 # Auxiliary information
PT_SHLIB = 5 # Reserved
PT_PHDR = 6 # Entry for header table itself
PT_TLS = 7 # Thread-local storage segment
PT_NUM = 8 # Number of defined types
PT_LOOS = 0x60000000 # Start of OS-specific
PT_GNU_EH_FRAME = 0x6474e550 # GCC .eh_frame_hdr segment
PT_GNU_STACK = 0x6474e551 # Indicates stack executability
PT_LOSUNW = 0x6ffffffa
PT_SUNWBSS = 0x6ffffffa # Sun Specific segment
PT_SUNWSTACK = 0x6ffffffb # Stack segment
PT_HISUNW = 0x6fffffff
PT_HIOS = 0x6fffffff # End of OS-specific
PT_LOPROC = 0x70000000 # Start of processor-specific
PT_HIPROC = 0x7fffffff # End of processor-specific
# Legal values for p_flags (segment flags).
PF_X = (1 << 0) # Segment is executable
PF_W = (1 << 1) # Segment is writable
PF_R = (1 << 2) # Segment is readable
PF_MASKOS = 0x0ff00000 # OS-specific
PF_MASKPROC = 0xf0000000 # Processor-specific
# Legal values for note segment descriptor types for core files.
NT_PRSTATUS = 1 # Contains copy of prstatus struct
NT_FPREGSET = 2 # Contains copy of fpregset struct
NT_PRPSINFO = 3 # Contains copy of prpsinfo struct
NT_PRXREG = 4 # Contains copy of prxregset struct
NT_TASKSTRUCT = 4 # Contains copy of task structure
NT_PLATFORM = 5 # String from sysinfo(SI_PLATFORM)
NT_AUXV = 6 # Contains copy of auxv array
NT_GWINDOWS = 7 # Contains copy of gwindows struct
NT_ASRS = 8 # Contains copy of asrset struct
NT_PSTATUS = 10 # Contains copy of pstatus struct
NT_PSINFO = 13 # Contains copy of psinfo struct
NT_PRCRED = 14 # Contains copy of prcred struct
NT_UTSNAME = 15 # Contains copy of utsname struct
NT_LWPSTATUS = 16 # Contains copy of lwpstatus struct
NT_LWPSINFO = 17 # Contains copy of lwpinfo struct
NT_PRFPXREG = 20 # Contains copy of fprxregset struct
# Legal values for the note segment descriptor types for object files.
NT_VERSION = 1 # Contains a version string.
# Legal values for ST_BIND subfield of st_info (symbol binding).
# bind = Sym.info >> 4
# val = Sym.info 0xf
STB_LOCAL = 0 # Local symbol
STB_GLOBAL = 1 # Global symbol
STB_WEAK = 2 # Weak symbol
STB_NUM = 3 # Number of defined types.
STB_LOOS = 10 # Start of OS-specific
STB_HIOS = 12 # End of OS-specific
STB_LOPROC = 13 # Start of processor-specific
STB_HIPROC = 15 # End of processor-specific
#Legal values for ST_TYPE subfield of st_info (symbol type).
STT_NOTYPE = 0 # Symbol type is unspecified
STT_OBJECT = 1 # Symbol is a data object
STT_FUNC = 2 # Symbol is a code object
STT_SECTION = 3 # Symbol associated with a section
STT_FILE = 4 # Symbol's name is file name
STT_COMMON = 5 # Symbol is a common data object
STT_TLS = 6 # Symbol is thread-local data object*/
STT_NUM = 7 # Number of defined types.
STT_LOOS = 10 # Start of OS-specific
STT_GNU_IFUNC = 10 # Symbol is indirect code object
STT_HIOS = 12 # End of OS-specific
STT_LOPROC = 13 # Start of processor-specific
STT_HIPROC = 15 # End of processor-specific
# Legal values for d_tag (dynamic entry type).
DT_NULL = 0 # Marks end of dynamic section
DT_NEEDED = 1 # Name of needed library
DT_PLTRELSZ = 2 # Size in bytes of PLT relocs
DT_PLTGOT = 3 # Processor defined value
DT_HASH = 4 # Address of symbol hash table
DT_STRTAB = 5 # Address of string table
DT_SYMTAB = 6 # Address of symbol table
DT_RELA = 7 # Address of Rela relocs
DT_RELASZ = 8 # Total size of Rela relocs
DT_RELAENT = 9 # Size of one Rela reloc
DT_STRSZ = 10 # Size of string table
DT_SYMENT = 11 # Size of one symbol table entry
DT_INIT = 12 # Address of init function
DT_FINI = 13 # Address of termination function
DT_SONAME = 14 # Name of shared object
DT_RPATH = 15 # Library search path (deprecated)
DT_SYMBOLIC = 16 # Start symbol search here
DT_REL = 17 # Address of Rel relocs
DT_RELSZ = 18 # Total size of Rel relocs
DT_RELENT = 19 # Size of one Rel reloc
DT_PLTREL = 20 # Type of reloc in PLT
DT_DEBUG = 21 # For debugging; unspecified
DT_TEXTREL = 22 # Reloc might modify .text
DT_JMPREL = 23 # Address of PLT relocs
DT_BIND_NOW = 24 # Process relocations of object
DT_INIT_ARRAY = 25 # Array with addresses of init fct
DT_FINI_ARRAY = 26 # Array with addresses of fini fct
DT_INIT_ARRAYSZ = 27 # Size in bytes of DT_INIT_ARRAY
DT_FINI_ARRAYSZ = 28 # Size in bytes of DT_FINI_ARRAY
DT_RUNPATH = 29 # Library search path
DT_FLAGS = 30 # Flags for the object being loaded
DT_ENCODING = 32 # Start of encoded range
DT_PREINIT_ARRAY = 32 # Array with addresses of preinit fct
DT_PREINIT_ARRAYSZ = 33 # size in bytes of DT_PREINIT_ARRAY
DT_NUM = 34 # Number used
DT_LOOS = 0x6000000d # Start of OS-specific
DT_HIOS = 0x6ffff000 # End of OS-specific
DT_LOPROC = 0x70000000 # Start of processor-specific
DT_HIPROC = 0x7fffffff # End of processor-specific
#DT_PROCNUM = DT_MIPS_NUM # Most used by any processor
# DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the
# Dyn.d_un.d_val field of the Elf*_Dyn structure. This follows Sun's
# approach.
DT_VALRNGLO = 0x6ffffd00
DT_GNU_PRELINKED = 0x6ffffdf5 # Prelinking timestamp
DT_GNU_CONFLICTSZ = 0x6ffffdf6 # Size of conflict section
DT_GNU_LIBLISTSZ = 0x6ffffdf7 # Size of library list
DT_CHECKSUM = 0x6ffffdf8
DT_PLTPADSZ = 0x6ffffdf9
DT_MOVEENT = 0x6ffffdfa
DT_MOVESZ = 0x6ffffdfb
DT_FEATURE_1 = 0x6ffffdfc # Feature selection (DTF_*).
DT_POSFLAG_1 = 0x6ffffdfd # Flags for DT_* entries, effecting the following DT_* entry.
DT_SYMINSZ = 0x6ffffdfe # Size of syminfo table (in bytes)
DT_SYMINENT = 0x6ffffdff # Entry size of syminfo
DT_VALRNGHI = 0x6ffffdff
DT_VALNUM = 12
# DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the
# Dyn.d_un.d_ptr field of the Elf*_Dyn structure.
#
# If any adjustment is made to the ELF object after it has been
# built these entries will need to be adjusted.
DT_ADDRRNGLO = 0x6ffffe00
DT_GNU_CONFLICT = 0x6ffffef8 # Start of conflict section
DT_GNU_LIBLIST = 0x6ffffef9 # Library list
DT_CONFIG = 0x6ffffefa # Configuration information.
DT_DEPAUDIT = 0x6ffffefb # Dependency auditing.
DT_AUDIT = 0x6ffffefc # Object auditing.
DT_PLTPAD = 0x6ffffefd # PLT padding.
DT_MOVETAB = 0x6ffffefe # Move table.
DT_SYMINFO = 0x6ffffeff # Syminfo table.
DT_ADDRRNGHI = 0x6ffffeff
DT_ADDRNUM = 10
# The versioning entry types. The next are defined as part of the
# GNU extension.
DT_VERSYM = 0x6ffffff0
DT_RELACOUNT = 0x6ffffff9
DT_RELCOUNT = 0x6ffffffa
# These were chosen by Sun.
DT_FLAGS_1 = 0x6ffffffb # State flags, see DF_1_* below.
DT_VERDEF = 0x6ffffffc # Address of version definition table
DT_VERDEFNUM = 0x6ffffffd # Number of version definitions
DT_VERNEED = 0x6ffffffe # Address of table with needed versions
DT_VERNEEDNUM = 0x6fffffff # Number of needed versions
DT_VERSIONTAGNUM = 16
# Sun added these machine-independent extensions in the "processor-specific"
# range. Be compatible.
DT_AUXILIARY = 0x7ffffffd # Shared object to load before self
DT_FILTER = 0x7fffffff # Shared object to get values from
DT_EXTRANUM = 3
# Values of `d_un.d_val' in the DT_FLAGS entry.
DF_ORIGIN = 0x00000001 # Object may use DF_ORIGIN
DF_SYMBOLIC = 0x00000002 # Symbol resolutions starts here
DF_TEXTREL = 0x00000004 # Object contains text relocations
DF_BIND_NOW = 0x00000008 # No lazy binding for this object
DF_STATIC_TLS = 0x00000010 # Module uses the static TLS model
# State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1
# entry in the dynamic section.
DF_1_NOW = 0x00000001 # Set RTLD_NOW for this object.
DF_1_GLOBAL = 0x00000002 # Set RTLD_GLOBAL for this object.
DF_1_GROUP = 0x00000004 # Set RTLD_GROUP for this object.
DF_1_NODELETE = 0x00000008 # Set RTLD_NODELETE for this object.
DF_1_LOADFLTR = 0x00000010 # Trigger filtee loading at runtime.
DF_1_INITFIRST = 0x00000020 # Set RTLD_INITFIRST for this object
DF_1_NOOPEN = 0x00000040 # Set RTLD_NOOPEN for this object.
DF_1_ORIGIN = 0x00000080 # $ORIGIN must be handled.
DF_1_DIRECT = 0x00000100 # Direct binding enabled.
DF_1_TRANS = 0x00000200
DF_1_INTERPOSE = 0x00000400 # Object is used to interpose.
DF_1_NODEFLIB = 0x00000800 # Ignore default lib search path.
DF_1_NODUMP = 0x00001000 # Object can't be dldump'ed.
DF_1_CONFALT = 0x00002000 # Configuration alternative created.
DF_1_ENDFILTEE = 0x00004000 # Filtee terminates filters search.
DF_1_DISPRELDNE = 0x00008000 # Disp reloc applied at build time.
DF_1_DISPRELPND = 0x00010000 # Disp reloc applied at run-time.
# Flags for the feature selection in DT_FEATURE_1.
DTF_1_PARINIT = 0x00000001
DTF_1_CONFEXP = 0x00000002
# Flags in the DT_POSFLAG_1 entry effecting only the next DT_* entry.
DF_P1_LAZYLOAD = 0x00000001 # Lazyload following object.
DF_P1_GROUPPERM = 0x00000002 # Symbols from next object are not generally available.
# GNU Versioning
VER_FLG_BASE = 1 # Version of the file itself, must not be used to match symbols
VER_FLG_WEAK = 2 # Reference to this version is weak
VER_NEED_CURRENT = 1 # Versioning implementation number
# Relocs
# Motorola 68k relocations
R_68K_NONE = 0 # No reloc
R_68K_32 = 1 # Direct 32 bit
R_68K_16 = 2 # Direct 16 bit
R_68K_8 = 3 # Direct 8 bit
R_68K_PC32 = 4 # PC relative 32 bit
R_68K_PC16 = 5 # PC relative 16 bit
R_68K_PC8 = 6 # PC relative 8 bit
R_68K_GOT32 = 7 # 32 bit PC relative GOT entry
R_68K_GOT16 = 8 # 16 bit PC relative GOT entry
R_68K_GOT8 = 9 # 8 bit PC relative GOT entry
R_68K_GOT32O = 10 # 32 bit GOT offset
R_68K_GOT16O = 11 # 16 bit GOT offset
R_68K_GOT8O = 12 # 8 bit GOT offset
R_68K_PLT32 = 13 # 32 bit PC relative PLT address
R_68K_PLT16 = 14 # 16 bit PC relative PLT address
R_68K_PLT8 = 15 # 8 bit PC relative PLT address
R_68K_PLT32O = 16 # 32 bit PLT offset
R_68K_PLT16O = 17 # 16 bit PLT offset
R_68K_PLT8O = 18 # 8 bit PLT offset
R_68K_COPY = 19 # Copy symbol at runtime
R_68K_GLOB_DAT = 20 # Create GOT entry
R_68K_JMP_SLOT = 21 # Create PLT entry
R_68K_RELATIVE = 22 # Adjust by program base
R_68K_TLS_GD32 = 25 # 32 bit GOT offset for GD
R_68K_TLS_GD16 = 26 # 16 bit GOT offset for GD
R_68K_TLS_GD8 = 27 # 8 bit GOT offset for GD
R_68K_TLS_LDM32 = 28 # 32 bit GOT offset for LDM
R_68K_TLS_LDM16 = 29 # 16 bit GOT offset for LDM
R_68K_TLS_LDM8 = 30 # 8 bit GOT offset for LDM
R_68K_TLS_LDO32 = 31 # 32 bit module-relative offset
R_68K_TLS_LDO16 = 32 # 16 bit module-relative offset
R_68K_TLS_LDO8 = 33 # 8 bit module-relative offset
R_68K_TLS_IE32 = 34 # 32 bit GOT offset for IE
R_68K_TLS_IE16 = 35 # 16 bit GOT offset for IE
R_68K_TLS_IE8 = 36 # 8 bit GOT offset for IE
R_68K_TLS_LE32 = 37 # 32 bit offset relative to static TLS block
R_68K_TLS_LE16 = 38 # 16 bit offset relative to static TLS block
R_68K_TLS_LE8 = 39 # 8 bit offset relative to static TLS block
R_68K_TLS_DTPMOD32 = 40 # 32 bit module number
R_68K_TLS_DTPREL32 = 41 # 32 bit module-relative offset
R_68K_TLS_TPREL32 = 42 # 32 bit TP-relative offset
# Keep this the last entry.
R_68K_NUM = 43
# Intel 80386 relocations
R_386_NONE = 0 # No reloc
R_386_32 = 1 # Direct 32 bit
R_386_PC32 = 2 # PC relative 32 bit
R_386_GOT32 = 3 # 32 bit GOT entry
R_386_PLT32 = 4 # 32 bit PLT address
R_386_COPY = 5 # Copy symbol at runtime
R_386_GLOB_DAT = 6 # Create GOT entry
R_386_JMP_SLOT = 7 # Create PLT entry
R_386_RELATIVE = 8 # Adjust by program base
R_386_GOTOFF = 9 # 32 bit offset to GOT
R_386_GOTPC = 10 # 32 bit PC relative offset to GOT
R_386_32PLT = 11
R_386_TLS_TPOFF = 14 # Offset in static TLS block
R_386_TLS_IE = 15 # Address of GOT entry for static TLS block offset
R_386_TLS_GOTIE = 16 # GOT entry for static TLS block offset
R_386_TLS_LE = 17 # Offset relative to static TLS block
R_386_TLS_GD = 18 # Direct 32 bit for GNU version of general dynamic thread local data
R_386_TLS_LDM = 19 # Direct 32 bit for GNU version of local dynamic thread local data in LE code
R_386_16 = 20
R_386_PC16 = 21
R_386_8 = 22
R_386_PC8 = 23
R_386_TLS_GD_32 = 24 # Direct 32 bit for general dynamic thread local data
R_386_TLS_GD_PUSH = 25 # Tag for pushl in GD TLS code
R_386_TLS_GD_CALL = 26 # Relocation for call to __tls_get_addr()
R_386_TLS_GD_POP = 27 # Tag for popl in GD TLS code
R_386_TLS_LDM_32 = 28 # Direct 32 bit for local dynamic thread local data in LE code
R_386_TLS_LDM_PUSH = 29 # Tag for pushl in LDM TLS code
R_386_TLS_LDM_CALL = 30 # Relocation for call to __tls_get_addr() in LDM code
R_386_TLS_LDM_POP = 31 # Tag for popl in LDM TLS code
R_386_TLS_LDO_32 = 32 # Offset relative to TLS block
R_386_TLS_IE_32 = 33 # GOT entry for negated static TLS block offset
R_386_TLS_LE_32 = 34 # Negated offset relative to static TLS block
R_386_TLS_DTPMOD32 = 35 # ID of module containing symbol
R_386_TLS_DTPOFF32 = 36 # Offset in TLS block
R_386_TLS_TPOFF32 = 37 # Negated offset in static TLS block
# 38?
R_386_TLS_GOTDESC = 39 # GOT offset for TLS descriptor.
R_386_TLS_DESC_CALL = 40 # Marker of call through TLS descriptor for relaxation.
R_386_TLS_DESC = 41 # TLS descriptor containing pointer to code and to argument, returning the TLS offset for the symbol.
R_386_IRELATIVE = 42 # Adjust indirectly by program base
# Keep this the last entry.
R_386_NUM = 43
# SUN SPARC relocations
R_SPARC_NONE = 0 # No reloc
R_SPARC_8 = 1 # Direct 8 bit
R_SPARC_16 = 2 # Direct 16 bit
R_SPARC_32 = 3 # Direct 32 bit
R_SPARC_DISP8 = 4 # PC relative 8 bit
R_SPARC_DISP16 = 5 # PC relative 16 bit
R_SPARC_DISP32 = 6 # PC relative 32 bit
R_SPARC_WDISP30 = 7 # PC relative 30 bit shifted
R_SPARC_WDISP22 = 8 # PC relative 22 bit shifted
R_SPARC_HI22 = 9 # High 22 bit
R_SPARC_22 = 10 # Direct 22 bit
R_SPARC_13 = 11 # Direct 13 bit
R_SPARC_LO10 = 12 # Truncated 10 bit
R_SPARC_GOT10 = 13 # Truncated 10 bit GOT entry
R_SPARC_GOT13 = 14 # 13 bit GOT entry
R_SPARC_GOT22 = 15 # 22 bit GOT entry shifted
R_SPARC_PC10 = 16 # PC relative 10 bit truncated
R_SPARC_PC22 = 17 # PC relative 22 bit shifted
R_SPARC_WPLT30 = 18 # 30 bit PC relative PLT address
R_SPARC_COPY = 19 # Copy symbol at runtime
R_SPARC_GLOB_DAT = 20 # Create GOT entry
R_SPARC_JMP_SLOT = 21 # Create PLT entry
R_SPARC_RELATIVE = 22 # Adjust by program base
R_SPARC_UA32 = 23 # Direct 32 bit unaligned
# Additional Sparc64 relocs.
R_SPARC_PLT32 = 24 # Direct 32 bit ref to PLT entry
R_SPARC_HIPLT22 = 25 # High 22 bit PLT entry
R_SPARC_LOPLT10 = 26 # Truncated 10 bit PLT entry
R_SPARC_PCPLT32 = 27 # PC rel 32 bit ref to PLT entry
R_SPARC_PCPLT22 = 28 # PC rel high 22 bit PLT entry
R_SPARC_PCPLT10 = 29 # PC rel trunc 10 bit PLT entry
R_SPARC_10 = 30 # Direct 10 bit
R_SPARC_11 = 31 # Direct 11 bit
R_SPARC_64 = 32 # Direct 64 bit
R_SPARC_OLO10 = 33 # 10bit with secondary 13bit addend
R_SPARC_HH22 = 34 # Top 22 bits of direct 64 bit
R_SPARC_HM10 = 35 # High middle 10 bits of ...
R_SPARC_LM22 = 36 # Low middle 22 bits of ...
R_SPARC_PC_HH22 = 37 # Top 22 bits of pc rel 64 bit
R_SPARC_PC_HM10 = 38 # High middle 10 bit of ...
R_SPARC_PC_LM22 = 39 # Low miggle 22 bits of ...
R_SPARC_WDISP16 = 40 # PC relative 16 bit shifted
R_SPARC_WDISP19 = 41 # PC relative 19 bit shifted
R_SPARC_GLOB_JMP = 42 # was part of v9 ABI but was removed
R_SPARC_7 = 43 # Direct 7 bit
R_SPARC_5 = 44 # Direct 5 bit
R_SPARC_6 = 45 # Direct 6 bit
R_SPARC_DISP64 = 46 # PC relative 64 bit
R_SPARC_PLT64 = 47 # Direct 64 bit ref to PLT entry
R_SPARC_HIX22 = 48 # High 22 bit complemented
R_SPARC_LOX10 = 49 # Truncated 11 bit complemented
R_SPARC_H44 = 50 # Direct high 12 of 44 bit
R_SPARC_M44 = 51 # Direct mid 22 of 44 bit
R_SPARC_L44 = 52 # Direct low 10 of 44 bit
R_SPARC_REGISTER = 53 # Global register usage
R_SPARC_UA64 = 54 # Direct 64 bit unaligned
R_SPARC_UA16 = 55 # Direct 16 bit unaligned
R_SPARC_TLS_GD_HI22 = 56
R_SPARC_TLS_GD_LO10 = 57
R_SPARC_TLS_GD_ADD = 58
R_SPARC_TLS_GD_CALL = 59
R_SPARC_TLS_LDM_HI22 = 60
R_SPARC_TLS_LDM_LO10 = 61
R_SPARC_TLS_LDM_ADD = 62
R_SPARC_TLS_LDM_CALL = 63
R_SPARC_TLS_LDO_HIX22 = 64
R_SPARC_TLS_LDO_LOX10 = 65
R_SPARC_TLS_LDO_ADD = 66
R_SPARC_TLS_IE_HI22 = 67
R_SPARC_TLS_IE_LO10 = 68
R_SPARC_TLS_IE_LD = 69
R_SPARC_TLS_IE_LDX = 70
R_SPARC_TLS_IE_ADD = 71
R_SPARC_TLS_LE_HIX22 = 72
R_SPARC_TLS_LE_LOX10 = 73
R_SPARC_TLS_DTPMOD32 = 74
R_SPARC_TLS_DTPMOD64 = 75
R_SPARC_TLS_DTPOFF32 = 76
R_SPARC_TLS_DTPOFF64 = 77
R_SPARC_TLS_TPOFF32 = 78
R_SPARC_TLS_TPOFF64 = 79
R_SPARC_GOTDATA_HIX22 = 80
R_SPARC_GOTDATA_LOX10 = 81
R_SPARC_GOTDATA_OP_HIX22 = 82
R_SPARC_GOTDATA_OP_LOX10 = 83
R_SPARC_GOTDATA_OP = 84
R_SPARC_H34 = 85
R_SPARC_SIZE32 = 86
R_SPARC_SIZE64 = 87
R_SPARC_JMP_IREL = 248
R_SPARC_IRELATIVE = 249
R_SPARC_GNU_VTINHERIT = 250
R_SPARC_GNU_VTENTRY = 251
R_SPARC_REV32 = 252
# Keep this the last entry.
R_SPARC_NUM = 253
# MIPS R3000 relocations
R_MIPS_NONE = 0 # No reloc
R_MIPS_16 = 1 # Direct 16 bit
R_MIPS_32 = 2 # Direct 32 bit
R_MIPS_REL32 = 3 # PC relative 32 bit
R_MIPS_26 = 4 # Direct 26 bit shifted
R_MIPS_HI16 = 5 # High 16 bit
R_MIPS_LO16 = 6 # Low 16 bit
R_MIPS_GPREL16 = 7 # GP relative 16 bit
R_MIPS_LITERAL = 8 # 16 bit literal entry
R_MIPS_GOT16 = 9 # 16 bit GOT entry
R_MIPS_PC16 = 10 # PC relative 16 bit
R_MIPS_CALL16 = 11 # 16 bit GOT entry for function
R_MIPS_GPREL32 = 12 # GP relative 32 bit
R_MIPS_SHIFT5 = 16
R_MIPS_SHIFT6 = 17
R_MIPS_64 = 18
R_MIPS_GOT_DISP = 19
R_MIPS_GOT_PAGE = 20
R_MIPS_GOT_OFST = 21
R_MIPS_GOT_HI16 = 22
R_MIPS_GOT_LO16 = 23
R_MIPS_SUB = 24
R_MIPS_INSERT_A = 25
R_MIPS_INSERT_B = 26
R_MIPS_DELETE = 27
R_MIPS_HIGHER = 28
R_MIPS_HIGHEST = 29
R_MIPS_CALL_HI16 = 30
R_MIPS_CALL_LO16 = 31
R_MIPS_SCN_DISP = 32
R_MIPS_REL16 = 33
R_MIPS_ADD_IMMEDIATE = 34
R_MIPS_PJUMP = 35
R_MIPS_RELGOT = 36
R_MIPS_JALR = 37
R_MIPS_TLS_DTPMOD32 = 38 # Module number 32 bit
R_MIPS_TLS_DTPREL32 = 39 # Module-relative offset 32 bit
R_MIPS_TLS_DTPMOD64 = 40 # Module number 64 bit
R_MIPS_TLS_DTPREL64 = 41 # Module-relative offset 64 bit
R_MIPS_TLS_GD = 42 # 16 bit GOT offset for GD
R_MIPS_TLS_LDM = 43 # 16 bit GOT offset for LDM
R_MIPS_TLS_DTPREL_HI16 = 44 # Module-relative offset, high 16 bits
R_MIPS_TLS_DTPREL_LO16 = 45 # Module-relative offset, low 16 bits
R_MIPS_TLS_GOTTPREL = 46 # 16 bit GOT offset for IE
R_MIPS_TLS_TPREL32 = 47 # TP-relative offset, 32 bit
R_MIPS_TLS_TPREL64 = 48 # TP-relative offset, 64 bit
R_MIPS_TLS_TPREL_HI16 = 49 # TP-relative offset, high 16 bits
R_MIPS_TLS_TPREL_LO16 = 50 # TP-relative offset, low 16 bits
R_MIPS_GLOB_DAT = 51
R_MIPS_COPY = 126
R_MIPS_JUMP_SLOT = 127
# Keep this the last entry.
R_MIPS_NUM = 128
# HPPA relocations
R_PARISC_NONE = 0 # No reloc.
R_PARISC_DIR32 = 1 # Direct 32-bit reference.
R_PARISC_DIR21L = 2 # Left 21 bits of eff. address.
R_PARISC_DIR17R = 3 # Right 17 bits of eff. address.
R_PARISC_DIR17F = 4 # 17 bits of eff. address.
R_PARISC_DIR14R = 6 # Right 14 bits of eff. address.
R_PARISC_PCREL32 = 9 # 32-bit rel. address.
R_PARISC_PCREL21L = 10 # Left 21 bits of rel. address.
R_PARISC_PCREL17R = 11 # Right 17 bits of rel. address.
R_PARISC_PCREL17F = 12 # 17 bits of rel. address.
R_PARISC_PCREL14R = 14 # Right 14 bits of rel. address.
R_PARISC_DPREL21L = 18 # Left 21 bits of rel. address.
R_PARISC_DPREL14R = 22 # Right 14 bits of rel. address.
R_PARISC_GPREL21L = 26 # GP-relative, left 21 bits.
R_PARISC_GPREL14R = 30 # GP-relative, right 14 bits.
R_PARISC_LTOFF21L = 34 # LT-relative, left 21 bits.
R_PARISC_LTOFF14R = 38 # LT-relative, right 14 bits.
R_PARISC_SECREL32 = 41 # 32 bits section rel. address.
R_PARISC_SEGBASE = 48 # No relocation, set segment base.
R_PARISC_SEGREL32 = 49 # 32 bits segment rel. address.
R_PARISC_PLTOFF21L = 50 # PLT rel. address, left 21 bits.
R_PARISC_PLTOFF14R = 54 # PLT rel. address, right 14 bits.
R_PARISC_LTOFF_FPTR32 = 57 # 32 bits LT-rel. function pointer.
R_PARISC_LTOFF_FPTR21L = 58 # LT-rel. fct ptr, left 21 bits.
R_PARISC_LTOFF_FPTR14R = 62 # LT-rel. fct ptr, right 14 bits.
R_PARISC_FPTR64 = 64 # 64 bits function address.
R_PARISC_PLABEL32 = 65 # 32 bits function address.
R_PARISC_PLABEL21L = 66 # Left 21 bits of fdesc address.
R_PARISC_PLABEL14R = 70 # Right 14 bits of fdesc address.
R_PARISC_PCREL64 = 72 # 64 bits PC-rel. address.
R_PARISC_PCREL22F = 74 # 22 bits PC-rel. address.
R_PARISC_PCREL14WR = 75 # PC-rel. address, right 14 bits.
R_PARISC_PCREL14DR = 76 # PC rel. address, right 14 bits.
R_PARISC_PCREL16F = 77 # 16 bits PC-rel. address.
R_PARISC_PCREL16WF = 78 # 16 bits PC-rel. address.
R_PARISC_PCREL16DF = 79 # 16 bits PC-rel. address.
R_PARISC_DIR64 = 80 # 64 bits of eff. address.
R_PARISC_DIR14WR = 83 # 14 bits of eff. address.
R_PARISC_DIR14DR = 84 # 14 bits of eff. address.
R_PARISC_DIR16F = 85 # 16 bits of eff. address.
R_PARISC_DIR16WF = 86 # 16 bits of eff. address.
R_PARISC_DIR16DF = 87 # 16 bits of eff. address.
R_PARISC_GPREL64 = 88 # 64 bits of GP-rel. address.
R_PARISC_GPREL14WR = 91 # GP-rel. address, right 14 bits.
R_PARISC_GPREL14DR = 92 # GP-rel. address, right 14 bits.
R_PARISC_GPREL16F = 93 # 16 bits GP-rel. address.
R_PARISC_GPREL16WF = 94 # 16 bits GP-rel. address.
R_PARISC_GPREL16DF = 95 # 16 bits GP-rel. address.
R_PARISC_LTOFF64 = 96 # 64 bits LT-rel. address.
R_PARISC_LTOFF14WR = 99 # LT-rel. address, right 14 bits.
R_PARISC_LTOFF14DR = 100 # LT-rel. address, right 14 bits.
R_PARISC_LTOFF16F = 101 # 16 bits LT-rel. address.
R_PARISC_LTOFF16WF = 102 # 16 bits LT-rel. address.
R_PARISC_LTOFF16DF = 103 # 16 bits LT-rel. address.
R_PARISC_SECREL64 = 104 # 64 bits section rel. address.
R_PARISC_SEGREL64 = 112 # 64 bits segment rel. address.
R_PARISC_PLTOFF14WR = 115 # PLT-rel. address, right 14 bits.
R_PARISC_PLTOFF14DR = 116 # PLT-rel. address, right 14 bits.
R_PARISC_PLTOFF16F = 117 # 16 bits LT-rel. address.
R_PARISC_PLTOFF16WF = 118 # 16 bits PLT-rel. address.
R_PARISC_PLTOFF16DF = 119 # 16 bits PLT-rel. address.
R_PARISC_LTOFF_FPTR64 = 120 # 64 bits LT-rel. function ptr.
R_PARISC_LTOFF_FPTR14WR = 123 # LT-rel. fct. ptr., right 14 bits.
R_PARISC_LTOFF_FPTR14DR = 124 # LT-rel. fct. ptr., right 14 bits.
R_PARISC_LTOFF_FPTR16F = 125 # 16 bits LT-rel. function ptr.
R_PARISC_LTOFF_FPTR16WF = 126 # 16 bits LT-rel. function ptr.
R_PARISC_LTOFF_FPTR16DF = 127 # 16 bits LT-rel. function ptr.
R_PARISC_LORESERVE = 128
R_PARISC_COPY = 128 # Copy relocation.
R_PARISC_IPLT = 129 # Dynamic reloc, imported PLT
R_PARISC_EPLT = 130 # Dynamic reloc, exported PLT
R_PARISC_TPREL32 = 153 # 32 bits TP-rel. address.
R_PARISC_TPREL21L = 154 # TP-rel. address, left 21 bits.
R_PARISC_TPREL14R = 158 # TP-rel. address, right 14 bits.
R_PARISC_LTOFF_TP21L = 162 # LT-TP-rel. address, left 21 bits.
R_PARISC_LTOFF_TP14R = 166 # LT-TP-rel. address, right 14 bits.*/
R_PARISC_LTOFF_TP14F = 167 # 14 bits LT-TP-rel. address.
R_PARISC_TPREL64 = 216 # 64 bits TP-rel. address.
R_PARISC_TPREL14WR = 219 # TP-rel. address, right 14 bits.
R_PARISC_TPREL14DR = 220 # TP-rel. address, right 14 bits.
R_PARISC_TPREL16F = 221 # 16 bits TP-rel. address.
R_PARISC_TPREL16WF = 222 # 16 bits TP-rel. address.
R_PARISC_TPREL16DF = 223 # 16 bits TP-rel. address.
R_PARISC_LTOFF_TP64 = 224 # 64 bits LT-TP-rel. address.
R_PARISC_LTOFF_TP14WR = 227 # LT-TP-rel. address, right 14 bits.*/
R_PARISC_LTOFF_TP14DR = 228 # LT-TP-rel. address, right 14 bits.*/
R_PARISC_LTOFF_TP16F = 229 # 16 bits LT-TP-rel. address.
R_PARISC_LTOFF_TP16WF = 230 # 16 bits LT-TP-rel. address.
R_PARISC_LTOFF_TP16DF = 231 # 16 bits LT-TP-rel. address.
R_PARISC_GNU_VTENTRY = 232
R_PARISC_GNU_VTINHERIT = 233
R_PARISC_TLS_GD21L = 234 # GD 21-bit left.
R_PARISC_TLS_GD14R = 235 # GD 14-bit right.
R_PARISC_TLS_GDCALL = 236 # GD call to __t_g_a.
R_PARISC_TLS_LDM21L = 237 # LD module 21-bit left.
R_PARISC_TLS_LDM14R = 238 # LD module 14-bit right.
R_PARISC_TLS_LDMCALL = 239 # LD module call to __t_g_a.
R_PARISC_TLS_LDO21L = 240 # LD offset 21-bit left.
R_PARISC_TLS_LDO14R = 241 # LD offset 14-bit right.
R_PARISC_TLS_DTPMOD32 = 242 # DTP module 32-bit.
R_PARISC_TLS_DTPMOD64 = 243 # DTP module 64-bit.
R_PARISC_TLS_DTPOFF32 = 244 # DTP offset 32-bit.
R_PARISC_TLS_DTPOFF64 = 245 # DTP offset 32-bit.
R_PARISC_TLS_LE21L = R_PARISC_TPREL21L
R_PARISC_TLS_LE14R = R_PARISC_TPREL14R
R_PARISC_TLS_IE21L = R_PARISC_LTOFF_TP21L
R_PARISC_TLS_IE14R = R_PARISC_LTOFF_TP14R
R_PARISC_TLS_TPREL32 = R_PARISC_TPREL32
R_PARISC_TLS_TPREL64 = R_PARISC_TPREL64
R_PARISC_HIRESERVE = 255
# Alpha relocations
R_ALPHA_NONE = 0 # No reloc
R_ALPHA_REFLONG = 1 # Direct 32 bit
R_ALPHA_REFQUAD = 2 # Direct 64 bit
R_ALPHA_GPREL32 = 3 # GP relative 32 bit
R_ALPHA_LITERAL = 4 # GP relative 16 bit w/optimization
R_ALPHA_LITUSE = 5 # Optimization hint for LITERAL
R_ALPHA_GPDISP = 6 # Add displacement to GP
R_ALPHA_BRADDR = 7 # PC+4 relative 23 bit shifted
R_ALPHA_HINT = 8 # PC+4 relative 16 bit shifted
R_ALPHA_SREL16 = 9 # PC relative 16 bit
R_ALPHA_SREL32 = 10 # PC relative 32 bit
R_ALPHA_SREL64 = 11 # PC relative 64 bit
R_ALPHA_GPRELHIGH = 17 # GP relative 32 bit, high 16 bits
R_ALPHA_GPRELLOW = 18 # GP relative 32 bit, low 16 bits
R_ALPHA_GPREL16 = 19 # GP relative 16 bit
R_ALPHA_COPY = 24 # Copy symbol at runtime
R_ALPHA_GLOB_DAT = 25 # Create GOT entry
R_ALPHA_JMP_SLOT = 26 # Create PLT entry
R_ALPHA_RELATIVE = 27 # Adjust by program base
R_ALPHA_TLS_GD_HI = 28
R_ALPHA_TLSGD = 29
R_ALPHA_TLS_LDM = 30
R_ALPHA_DTPMOD64 = 31
R_ALPHA_GOTDTPREL = 32
R_ALPHA_DTPREL64 = 33
R_ALPHA_DTPRELHI = 34
R_ALPHA_DTPRELLO = 35
R_ALPHA_DTPREL16 = 36
R_ALPHA_GOTTPREL = 37
R_ALPHA_TPREL64 = 38
R_ALPHA_TPRELHI = 39
R_ALPHA_TPRELLO = 40
R_ALPHA_TPREL16 = 41
# Keep this the last entry.
R_ALPHA_NUM = 46
# PowerPC relocations
R_PPC_NONE = 0
R_PPC_ADDR32 = 1 # 32bit absolute address
R_PPC_ADDR24 = 2 # 26bit address, 2 bits ignored.
R_PPC_ADDR16 = 3 # 16bit absolute address
R_PPC_ADDR16_LO = 4 # lower 16bit of absolute address
R_PPC_ADDR16_HI = 5 # high 16bit of absolute address
R_PPC_ADDR16_HA = 6 # adjusted high 16bit
R_PPC_ADDR14 = 7 # 16bit address, 2 bits ignored
R_PPC_ADDR14_BRTAKEN = 8
R_PPC_ADDR14_BRNTAKEN = 9
R_PPC_REL24 = 10 # PC relative 26 bit
R_PPC_REL14 = 11 # PC relative 16 bit
R_PPC_REL14_BRTAKEN = 12
R_PPC_REL14_BRNTAKEN = 13
R_PPC_GOT16 = 14
R_PPC_GOT16_LO = 15
R_PPC_GOT16_HI = 16
R_PPC_GOT16_HA = 17
R_PPC_PLTREL24 = 18
R_PPC_COPY = 19
R_PPC_GLOB_DAT = 20
R_PPC_JMP_SLOT = 21
R_PPC_RELATIVE = 22
R_PPC_LOCAL24PC = 23
R_PPC_UADDR32 = 24
R_PPC_UADDR16 = 25
R_PPC_REL32 = 26
R_PPC_PLT32 = 27
R_PPC_PLTREL32 = 28
R_PPC_PLT16_LO = 29
R_PPC_PLT16_HI = 30
R_PPC_PLT16_HA = 31
R_PPC_SDAREL16 = 32
R_PPC_SECTOFF = 33
R_PPC_SECTOFF_LO = 34
R_PPC_SECTOFF_HI = 35
R_PPC_SECTOFF_HA = 36
# PowerPC relocations defined for the TLS access ABI.
R_PPC_TLS = 67 # none (sym+add)@tls
R_PPC_DTPMOD32 = 68 # word32 (sym+add)@dtpmod
R_PPC_TPREL16 = 69 # half16* (sym+add)@tprel
R_PPC_TPREL16_LO = 70 # half16 (sym+add)@tprel@l
R_PPC_TPREL16_HI = 71 # half16 (sym+add)@tprel@h
R_PPC_TPREL16_HA = 72 # half16 (sym+add)@tprel@ha
R_PPC_TPREL32 = 73 # word32 (sym+add)@tprel
R_PPC_DTPREL16 = 74 # half16* (sym+add)@dtprel
R_PPC_DTPREL16_LO = 75 # half16 (sym+add)@dtprel@l
R_PPC_DTPREL16_HI = 76 # half16 (sym+add)@dtprel@h
R_PPC_DTPREL16_HA = 77 # half16 (sym+add)@dtprel@ha
R_PPC_DTPREL32 = 78 # word32 (sym+add)@dtprel
R_PPC_GOT_TLSGD16 = 79 # half16* (sym+add)@got@tlsgd
R_PPC_GOT_TLSGD16_LO = 80 # half16 (sym+add)@got@tlsgd@l
R_PPC_GOT_TLSGD16_HI = 81 # half16 (sym+add)@got@tlsgd@h
R_PPC_GOT_TLSGD16_HA = 82 # half16 (sym+add)@got@tlsgd@ha
R_PPC_GOT_TLSLD16 = 83 # half16* (sym+add)@got@tlsld
R_PPC_GOT_TLSLD16_LO = 84 # half16 (sym+add)@got@tlsld@l
R_PPC_GOT_TLSLD16_HI = 85 # half16 (sym+add)@got@tlsld@h
R_PPC_GOT_TLSLD16_HA = 86 # half16 (sym+add)@got@tlsld@ha
R_PPC_GOT_TPREL16 = 87 # half16* (sym+add)@got@tprel
R_PPC_GOT_TPREL16_LO = 88 # half16 (sym+add)@got@tprel@l
R_PPC_GOT_TPREL16_HI = 89 # half16 (sym+add)@got@tprel@h
R_PPC_GOT_TPREL16_HA = 90 # half16 (sym+add)@got@tprel@ha
R_PPC_GOT_DTPREL16 = 91 # half16* (sym+add)@got@dtprel
R_PPC_GOT_DTPREL16_LO = 92 # half16* (sym+add)@got@dtprel@l
R_PPC_GOT_DTPREL16_HI = 93 # half16* (sym+add)@got@dtprel@h
R_PPC_GOT_DTPREL16_HA = 94 # half16* (sym+add)@got@dtprel@ha
# The remaining relocs are from the Embedded ELF ABI, and are not in the SVR4 ELF ABI.
R_PPC_EMB_NADDR32 = 101
R_PPC_EMB_NADDR16 = 102
R_PPC_EMB_NADDR16_LO = 103
R_PPC_EMB_NADDR16_HI = 104
R_PPC_EMB_NADDR16_HA = 105
R_PPC_EMB_SDAI16 = 106
R_PPC_EMB_SDA2I16 = 107
R_PPC_EMB_SDA2REL = 108
R_PPC_EMB_SDA21 = 109 # 16 bit offset in SDA
R_PPC_EMB_MRKREF = 110
R_PPC_EMB_RELSEC16 = 111
R_PPC_EMB_RELST_LO = 112
R_PPC_EMB_RELST_HI = 113
R_PPC_EMB_RELST_HA = 114
R_PPC_EMB_BIT_FLD = 115
R_PPC_EMB_RELSDA = 116 # 16 bit relative offset in SDA
# Diab tool relocations.
R_PPC_DIAB_SDA21_LO = 180 # like EMB_SDA21, but lower 16 bit
R_PPC_DIAB_SDA21_HI = 181 # like EMB_SDA21, but high 16 bit
R_PPC_DIAB_SDA21_HA = 182 # like EMB_SDA21, adjusted high 16
R_PPC_DIAB_RELSDA_LO = 183 # like EMB_RELSDA, but lower 16 bit
R_PPC_DIAB_RELSDA_HI = 184 # like EMB_RELSDA, but high 16 bit
R_PPC_DIAB_RELSDA_HA = 185 # like EMB_RELSDA, adjusted high 16
# GNU extension to support local ifunc.
R_PPC_IRELATIVE = 248
# GNU relocs used in PIC code sequences.
R_PPC_REL16 = 249 # half16 (sym+add-.)
R_PPC_REL16_LO = 250 # half16 (sym+add-.)@l
R_PPC_REL16_HI = 251 # half16 (sym+add-.)@h
R_PPC_REL16_HA = 252 # half16 (sym+add-.)@ha
# This is a phony reloc to handle any old fashioned TOC16 references that may still be in object files.
R_PPC_TOC16 = 255
# PowerPC64 relocations defined by the ABIs
R_PPC64_NONE = R_PPC_NONE
R_PPC64_ADDR32 = R_PPC_ADDR32 # 32bit absolute address
R_PPC64_ADDR24 = R_PPC_ADDR24 # 26bit address, word aligned
R_PPC64_ADDR16 = R_PPC_ADDR16 # 16bit absolute address
R_PPC64_ADDR16_LO = R_PPC_ADDR16_LO # lower 16bits of address
R_PPC64_ADDR16_HI = R_PPC_ADDR16_HI # high 16bits of address.
R_PPC64_ADDR16_HA = R_PPC_ADDR16_HA # adjusted high 16bits.
R_PPC64_ADDR14 = R_PPC_ADDR14 # 16bit address, word aligned
R_PPC64_ADDR14_BRTAKEN = R_PPC_ADDR14_BRTAKEN
R_PPC64_ADDR14_BRNTAKEN = R_PPC_ADDR14_BRNTAKEN
R_PPC64_REL24 = R_PPC_REL24 # PC-rel. 26 bit, word aligned
R_PPC64_REL14 = R_PPC_REL14 # PC relative 16 bit
R_PPC64_REL14_BRTAKEN = R_PPC_REL14_BRTAKEN
R_PPC64_REL14_BRNTAKEN = R_PPC_REL14_BRNTAKEN
R_PPC64_GOT16 = R_PPC_GOT16
R_PPC64_GOT16_LO = R_PPC_GOT16_LO
R_PPC64_GOT16_HI = R_PPC_GOT16_HI
R_PPC64_GOT16_HA = R_PPC_GOT16_HA
R_PPC64_COPY = R_PPC_COPY
R_PPC64_GLOB_DAT = R_PPC_GLOB_DAT
R_PPC64_JMP_SLOT = R_PPC_JMP_SLOT
R_PPC64_RELATIVE = R_PPC_RELATIVE
R_PPC64_UADDR32 = R_PPC_UADDR32
R_PPC64_UADDR16 = R_PPC_UADDR16
R_PPC64_REL32 = R_PPC_REL32
R_PPC64_PLT32 = R_PPC_PLT32
R_PPC64_PLTREL32 = R_PPC_PLTREL32
R_PPC64_PLT16_LO = R_PPC_PLT16_LO
R_PPC64_PLT16_HI = R_PPC_PLT16_HI
R_PPC64_PLT16_HA = R_PPC_PLT16_HA
R_PPC64_SECTOFF = R_PPC_SECTOFF
R_PPC64_SECTOFF_LO = R_PPC_SECTOFF_LO
R_PPC64_SECTOFF_HI = R_PPC_SECTOFF_HI
R_PPC64_SECTOFF_HA = R_PPC_SECTOFF_HA
R_PPC64_ADDR30 = 37 # word30 (S + A - P) >> 2
R_PPC64_ADDR64 = 38 # doubleword64 S + A
R_PPC64_ADDR16_HIGHER = 39 # half16 #higher(S + A)
R_PPC64_ADDR16_HIGHERA = 40 # half16 #highera(S + A)
R_PPC64_ADDR16_HIGHEST = 41 # half16 #highest(S + A)
R_PPC64_ADDR16_HIGHESTA = 42 # half16 #highesta(S + A)
R_PPC64_UADDR64 = 43 # doubleword64 S + A
R_PPC64_REL64 = 44 # doubleword64 S + A - P
R_PPC64_PLT64 = 45 # doubleword64 L + A
R_PPC64_PLTREL64 = 46 # doubleword64 L + A - P
R_PPC64_TOC16 = 47 # half16* S + A - .TOC
R_PPC64_TOC16_LO = 48 # half16 #lo(S + A - .TOC.)
R_PPC64_TOC16_HI = 49 # half16 #hi(S + A - .TOC.)
R_PPC64_TOC16_HA = 50 # half16 #ha(S + A - .TOC.)
R_PPC64_TOC = 51 # doubleword64 .TOC
R_PPC64_PLTGOT16 = 52 # half16* M + A
R_PPC64_PLTGOT16_LO = 53 # half16 #lo(M + A)
R_PPC64_PLTGOT16_HI = 54 # half16 #hi(M + A)
R_PPC64_PLTGOT16_HA = 55 # half16 #ha(M + A)
R_PPC64_ADDR16_DS = 56 # half16ds* (S + A) >> 2
R_PPC64_ADDR16_LO_DS = 57 # half16ds #lo(S + A) >> 2
R_PPC64_GOT16_DS = 58 # half16ds* (G + A) >> 2
R_PPC64_GOT16_LO_DS = 59 # half16ds #lo(G + A) >> 2
R_PPC64_PLT16_LO_DS = 60 # half16ds #lo(L + A) >> 2
R_PPC64_SECTOFF_DS = 61 # half16ds* (R + A) >> 2
R_PPC64_SECTOFF_LO_DS = 62 # half16ds #lo(R + A) >> 2
R_PPC64_TOC16_DS = 63 # half16ds* (S + A - .TOC.) >> 2
R_PPC64_TOC16_LO_DS = 64 # half16ds #lo(S + A - .TOC.) >> 2
R_PPC64_PLTGOT16_DS = 65 # half16ds* (M + A) >> 2
R_PPC64_PLTGOT16_LO_DS = 66 # half16ds #lo(M + A) >> 2
# PowerPC64 relocations defined for the TLS access ABI.
R_PPC64_TLS = 67 # none (sym+add)@tls
R_PPC64_DTPMOD64 = 68 # doubleword64 (sym+add)@dtpmod
R_PPC64_TPREL16 = 69 # half16* (sym+add)@tprel
R_PPC64_TPREL16_LO = 70 # half16 (sym+add)@tprel@l
R_PPC64_TPREL16_HI = 71 # half16 (sym+add)@tprel@h
R_PPC64_TPREL16_HA = 72 # half16 (sym+add)@tprel@ha
R_PPC64_TPREL64 = 73 # doubleword64 (sym+add)@tprel
R_PPC64_DTPREL16 = 74 # half16* (sym+add)@dtprel
R_PPC64_DTPREL16_LO = 75 # half16 (sym+add)@dtprel@l
R_PPC64_DTPREL16_HI = 76 # half16 (sym+add)@dtprel@h
R_PPC64_DTPREL16_HA = 77 # half16 (sym+add)@dtprel@ha
R_PPC64_DTPREL64 = 78 # doubleword64 (sym+add)@dtprel
R_PPC64_GOT_TLSGD16 = 79 # half16* (sym+add)@got@tlsgd
R_PPC64_GOT_TLSGD16_LO = 80 # half16 (sym+add)@got@tlsgd@l
R_PPC64_GOT_TLSGD16_HI = 81 # half16 (sym+add)@got@tlsgd@h
R_PPC64_GOT_TLSGD16_HA = 82 # half16 (sym+add)@got@tlsgd@ha
R_PPC64_GOT_TLSLD16 = 83 # half16* (sym+add)@got@tlsld
R_PPC64_GOT_TLSLD16_LO = 84 # half16 (sym+add)@got@tlsld@l
R_PPC64_GOT_TLSLD16_HI = 85 # half16 (sym+add)@got@tlsld@h
R_PPC64_GOT_TLSLD16_HA = 86 # half16 (sym+add)@got@tlsld@ha
R_PPC64_GOT_TPREL16_DS = 87 # half16ds* (sym+add)@got@tprel
R_PPC64_GOT_TPREL16_LO_DS = 88 # half16ds (sym+add)@got@tprel@l
R_PPC64_GOT_TPREL16_HI = 89 # half16 (sym+add)@got@tprel@h
R_PPC64_GOT_TPREL16_HA = 90 # half16 (sym+add)@got@tprel@ha
R_PPC64_GOT_DTPREL16_DS = 91 # half16ds* (sym+add)@got@dtprel
R_PPC64_GOT_DTPREL16_LO_DS = 92 # half16ds (sym+add)@got@dtprel@l
R_PPC64_GOT_DTPREL16_HI = 93 # half16 (sym+add)@got@dtprel@h
R_PPC64_GOT_DTPREL16_HA = 94 # half16 (sym+add)@got@dtprel@ha
R_PPC64_TPREL16_DS = 95 # half16ds* (sym+add)@tprel
R_PPC64_TPREL16_LO_DS = 96 # half16ds (sym+add)@tprel@l
R_PPC64_TPREL16_HIGHER = 97 # half16 (sym+add)@tprel@higher
R_PPC64_TPREL16_HIGHERA = 98 # half16 (sym+add)@tprel@highera
R_PPC64_TPREL16_HIGHEST = 99 # half16 (sym+add)@tprel@highest
R_PPC64_TPREL16_HIGHESTA = 100 # half16 (sym+add)@tprel@highesta
R_PPC64_DTPREL16_DS = 101 # half16ds* (sym+add)@dtprel
R_PPC64_DTPREL16_LO_DS = 102 # half16ds (sym+add)@dtprel@l
R_PPC64_DTPREL16_HIGHER = 103 # half16 (sym+add)@dtprel@higher
R_PPC64_DTPREL16_HIGHERA = 104 # half16 (sym+add)@dtprel@highera
R_PPC64_DTPREL16_HIGHEST = 105 # half16 (sym+add)@dtprel@highest
R_PPC64_DTPREL16_HIGHESTA = 106 # half16 (sym+add)@dtprel@highesta
# GNU extension to support local ifunc.
R_PPC64_JMP_IREL = 247
R_PPC64_IRELATIVE = 248
R_PPC64_REL16 = 249 # half16 (sym+add-.)
R_PPC64_REL16_LO = 250 # half16 (sym+add-.)@l
R_PPC64_REL16_HI = 251 # half16 (sym+add-.)@h
R_PPC64_REL16_HA = 252 # half16 (sym+add-.)@ha
# PowerPC64 specific values for the Dyn d_tag field.
DT_PPC64_GLINK = (DT_LOPROC + 0)
| DT_PPC64_OPD = (DT_LOPROC + 1) | 6,803 | lcc_e | python | null | 752810f3c699f5d0c43979ff990a3efa5574fa10a2e4c194 |
|
"""
# Copyright Anne M. Archibald 2008
# Released under the scipy license
"""
import sys
from heapq import heappush, heappop
import math
import numpy as np
import scipy.sparse
__all__ = ['minkowski_distance_p', 'minkowski_distance', 'haversine_distance',
'distance_matrix',
'Rectangle', 'KDTree']
RADIUS_EARTH = 6378.0
HALF_PI = np.pi / 2.0
PI = np.pi
TWO_PI = np.pi * 2.0
def minkowski_distance_p(x, y, p=2):
"""
Compute the p-th power of the L**p distance between two arrays.
For efficiency, this function computes the L**p distance but does
not extract the pth root. If `p` is 1 or infinity, this is equal to
the actual L**p distance.
:param x: (M, K) array_like
Input array.
:param y: (N, K) array_like
Input array.
:param p: float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples::
>>> minkowski_distance_p([[0, 0], [0, 0]], [[1, 1], [0, 1]])
array([2, 1])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf:
return np.amax(np.abs(y - x), axis=-1)
elif p == 1:
return np.sum(np.abs(y - x), axis=-1)
else:
return np.sum(np.abs(y - x) ** p, axis=-1)
def minkowski_distance(x, y, p=2):
"""
Compute the L**p distance between two arrays.
:param x: (M, K) array_like
Input array.
:param y: (N, K) array_like
Input array.
:param p: float, 1 <= p <= infinity
Which Minkowski p-norm to use.
:return:
Examples::
>>> minkowski_distance([[0, 0], [0, 0]], [[1, 1], [0, 1]])
array([ 1.41421356, 1. ])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf or p == 1:
return minkowski_distance_p(x, y, p)
else:
return minkowski_distance_p(x, y, p) ** (1. / p)
def haversine_distance(x, y):
"""Computes the Haversine distance in kilometres between two points
:param x: first point or array of points, each as array of latitude, longitude in degrees
:param y: second point or array of points, each as array of latitude, longitude in degrees
:return: distance between the two points in kilometres
"""
x = np.asarray(x)
y = np.asarray(y)
return haversine(x, y)
def haversine(x, y):
"""Computes the Haversine distance in kilometres between two points
:param x: first point or points as array, each as array of latitude, longitude in degrees
:param y: second point or points as array, each as array of latitude, longitude in degrees
:return: distance between the two points in kilometres
"""
if x.ndim == 1:
lat1, lon1 = x[0], x[1]
else:
lat1, lon1 = x[:, 0], x[:, 1]
if y.ndim == 1:
lat2, lon2 = y[0], y[1]
else:
lat2, lon2 = y[:, 0], y[:, 1]
lat1 = lat1 * math.pi / 180
lat2 = lat2 * math.pi / 180
lon1 = lon1 * math.pi / 180
lon2 = lon2 * math.pi / 180
arclen = 2 * np.arcsin(np.sqrt((np.sin((lat2 - lat1) / 2)) ** 2 +
np.cos(lat1) * np.cos(lat2) * (np.sin((lon2 - lon1) / 2)) ** 2))
return arclen * RADIUS_EARTH
def haversine_distance_from_radians(x, y):
"""Computes the Haversine distance in kilometres between two points
:param x: first point as array of latitude, longitude in radians
:param y: second point as array of latitude, longitude in radians
:return: distance between the two points in kilometres
"""
lat1, lon1 = x[0], x[1]
lat2, lon2 = y[0], y[1]
arclen = 2 * np.arcsin(np.sqrt((np.sin((lat2 - lat1) / 2)) ** 2 +
np.cos(lat1) * np.cos(lat2) * (np.sin((lon2 - lon1) / 2)) ** 2))
return arclen * RADIUS_EARTH
def geodesic_to_line_of_longitude_crossing_latitude(point, longitude):
"""Given a line of longitude and a point, finds the latitude at which the
geodesic of shortest distance to the line of longitude crosses the line of
longitude.
:param point: point as array of latitude, longitude in radians
:param longitude: longitude in radians
:return: latitude at point of intersection in radians
"""
pt_lat = point[0]
pt_lon = point[1]
# Derived from one of Napier's rules for right angled spherical triangles:
# tan(pt_lat) = tan(latitude) * cos(pt_lon - longitude)
# - behaves better as |pt_lon - longitude| -> pi/2
try:
sin_lat = math.sin(pt_lat) / math.sqrt(1 - math.cos(pt_lat) ** 2 * math.sin(pt_lon - longitude) ** 2)
except ZeroDivisionError:
# pt_lat = +/- pi/2 and pt_lon - longitude = pi/2 + n pi
# - degenerate case - all points on line of longitude equidistant from point.
# Return arbitrary value since the distance to the point does not depend on the
# returned value.
sin_lat = 0.0
try:
latitude = math.asin(sin_lat)
except ValueError:
# Rounding error has made sin_lat slightly > 1.
latitude = math.copysign(HALF_PI, sin_lat)
return latitude
def line_of_longitude_segment_nearer_end_latitude(edge_lat_min, edge_lat_max, edge_lon, pt_lat, pt_lon):
"""Given a segment of a line of longitude and a point, determines the end
of the segment nearer to the point.
:param edge_lat_min: lower latitude on line of longitude segment in radians
:param edge_lat_max: upper latitude on line of longitude segment in radians
:param edge_lon: longitude of segment in radians
:param pt_lat: latitude of point in radians
:param pt_lon: longitude of point in radians
:return: latitude of nearer segment endpoint in radians
"""
# Determine which side of a great circle perpendicular to the mid-point of
# the edge that the point lies on.
lat_mid = (edge_lat_min + edge_lat_max) / 2.0
# One of Napier's rules for right angled spherical triangles:
tan_lat_equi = math.tan(lat_mid) * math.cos(pt_lon - edge_lon)
return edge_lat_min if math.tan(pt_lat) < tan_lat_equi else edge_lat_max
class RectangleBase(object):
"""Hyperrectangle class.
Represents a Cartesian product of intervals.
"""
def __init__(self, maxes, mins):
"""Construct a hyperrectangle."""
self.maxes = np.maximum(maxes, mins).astype(np.float)
self.mins = np.minimum(maxes, mins).astype(np.float)
self.m, = self.maxes.shape
def __repr__(self):
return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
def volume(self):
"""Total volume."""
return np.prod(self.maxes - self.mins)
def split(self, d, split):
"""
Produce two hyperrectangles by splitting.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
:param d: int
Axis to split hyperrectangle along.
:param split:
Input.
"""
mid = np.copy(self.maxes)
mid[d] = split
less = self.__class__(self.mins, mid)
mid = np.copy(self.mins)
mid[d] = split
greater = self.__class__(mid, self.maxes)
return less, greater
class Rectangle(RectangleBase):
"""Rectangle using Euclidean metric.
"""
def min_distance_point(self, x, p=2.):
"""
Return the minimum distance between input and points in the hyperrectangle.
:param x: array_like
Input.
:param p: float, optional
Input.
"""
# a = self.mins - x
# b = x - self.maxes
# c = np.maximum(a, b)
# d = np.maximum(0, c)
return minkowski_distance(0, np.maximum(0, np.maximum(self.mins - x, x - self.maxes)), p)
def max_distance_point(self, x, p=2.):
"""
Return the maximum distance between input and points in the hyperrectangle.
:param x: array_like
Input array.
:param p: float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes - x, x - self.mins), p)
def min_distance_rectangle(self, other, p=2.):
"""
Compute the minimum distance between points in the two hyperrectangles.
:param other: hyperrectangle
Input.
:param p: float
Input.
"""
return minkowski_distance(0, np.maximum(0, np.maximum(self.mins - other.maxes, other.mins - self.maxes)), p)
def max_distance_rectangle(self, other, p=2.):
"""
Compute the maximum distance between points in the two hyperrectangles.
:param other: hyperrectangle
Input.
:param p: float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes - other.mins, other.maxes - self.mins), p)
class RectangleHaversine(RectangleBase):
"""Rectangular region in latitude/longitude coordinates on a sphere.
"""
def min_distance_point(self, x, p=2.):
"""Find the distance to the point in the rectangle closest to x.
:param x: point as array of latitude, longitude in degrees
:param p: unused
:return: distance in kilometres
"""
return self._min_distance_point(np.radians(x))
def max_distance_point(self, x, p=2.):
"""Find the distance to the point in the rectangle furthest from x.
This is the semi-circumference minus the distance to the point closest
to the polar opposite point.
:param x: point as array of latitude, longitude in degrees
:param p: unused
:return: distance in kilometres
"""
point = np.radians(x)
opp_pt = [-point[0], point[1] + PI]
opp_dist = self._min_distance_point(opp_pt)
return PI * RADIUS_EARTH - opp_dist
def _min_distance_point(self, point):
"""Find the distance to the point in the rectangle closest to x.
:param point: point as array of latitude, longitude in radians
:return: distance in kilometres
"""
rect_lat_min, rect_lon_min = np.radians(self.mins)
rect_lat_max, rect_lon_max = np.radians(self.maxes)
lat0 = point[0]
# Shift point longitude to be within pi of rectangle mid-longitude.
range_start = (rect_lon_min + rect_lon_max) / 2.0 - PI
lon0 = np.fmod(point[1] - range_start, TWO_PI) + range_start
inside = False
if rect_lon_min <= lon0 <= rect_lon_max:
# Within longitude range of rectangle - geodesic is line of longitude.
# Could simplify distance calculation.
lon1 = lon0
if lat0 < rect_lat_min:
lat1 = rect_lat_min
elif lat0 > rect_lat_max:
lat1 = rect_lat_max
else:
inside = True
# print("Inside")
else:
# Determine which side of the rectangle the point is nearer to allowing for longitude circularity.
lon_mid = (rect_lon_min + rect_lon_max) / 2.0
if (lon0 < lon_mid and lon_mid - lon0 < PI) or (lon0 > lon_mid and lon0 - lon_mid > PI):
# Point is nearest to left side of rectangle.
lon1 = rect_lon_min
lon_diff = rect_lon_min - lon0
if lon_diff < 0.0:
lon_diff += TWO_PI
if lon_diff > HALF_PI:
# Nearest point cannot be on rectangle edge - must be a vertex.
lat1 = line_of_longitude_segment_nearer_end_latitude(rect_lat_min, rect_lat_max, lon1, lat0, lon0)
else:
# To left of rectangle
lat1 = geodesic_to_line_of_longitude_crossing_latitude(point, rect_lon_min)
lat1 = np.minimum(np.maximum(lat1, rect_lat_min), rect_lat_max)
else:
# Point is nearest to right side of rectangle.
lon1 = rect_lon_max
lon_diff = lon0 - rect_lon_max
if lon_diff < 0.0:
lon_diff += TWO_PI
if lon_diff > HALF_PI:
# Nearest point cannot be on rectangle edge - must be a vertex.
lat1 = line_of_longitude_segment_nearer_end_latitude(rect_lat_min, rect_lat_max, lon1, lat0, lon0)
else:
lat1 = geodesic_to_line_of_longitude_crossing_latitude(point, rect_lon_max)
lat1 = np.minimum(np.maximum(lat1, rect_lat_min), rect_lat_max)
if inside:
dist = 0.0
else:
# print("Nearest point:", [np.degrees(lat1), np.degrees(lon1)])
dist = haversine_distance_from_radians([lat0, lon0], [lat1, lon1])
return dist
def min_distance_point_approx(self, x, p=2.):
"""Return the minimum distance between input and points in the hyperrectangle.
Approximate implementation determining the point to which to measure as if
in Euclidean space.
:param x: array_like
Input.
:param p: float, optional
Input.
"""
closest_point = np.minimum(np.maximum(x, self.mins), self.maxes)
return haversine_distance(x, closest_point)
def max_distance_point_approx(self, x, p=2.):
"""
Return the maximum distance between input and points in the hyperrectangle.
Approximate implementation determining the point to which to measure as if
in Euclidean space.
Parameters
----------
:param x: array_like
Input.
:param p: float, optional
Input.
"""
furthest_point = np.where(self.maxes - x > x - self.mins, self.maxes, self.mins)
return haversine_distance(x, furthest_point)
class KDTree(object):
"""
kd-tree for quick nearest-neighbor lookup
This class provides an index into a set of k-dimensional points which
can be used to rapidly look up the nearest neighbors of any point.
:param data: (N,K) array_like
The data points to be indexed. This array is not copied, and
so modifying this data will result in bogus results.
:param leafsize: int, optional
The number of points at which the algorithm switches over to
brute-force. Has to be positive. In the case of points which have
the same position this is not the minimum
:raises:
RuntimeError
The maximum recursion limit can be exceeded for large data
sets. If this happens, either increase the value for the `leafsize`
parameter or increase the recursion limit by::
>>> import sys
>>> sys.setrecursionlimit(10000)
Notes
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary tree, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
The tree also supports all-neighbors queries, both with arrays of points
and with other kd-trees. These do use a reasonably efficient algorithm,
but the kd-tree is not necessarily the best data structure for this
sort of calculation.
"""
def __init__(self, data, leafsize=10):
self.data = np.asarray(data)
self.n, self.m = np.shape(self.data)
self.leafsize = int(leafsize)
if self.leafsize < 1:
raise ValueError("leafsize must be at least 1")
self.maxes = np.amax(self.data, axis=0)
self.mins = np.amin(self.data, axis=0)
self.tree = self._build(np.arange(self.n), self.maxes, self.mins)
class node(object):
if sys.version_info[0] >= 3:
def __lt__(self, other): id(self) < id(other)
def __gt__(self, other): id(self) > id(other)
def __le__(self, other): id(self) <= id(other)
def __ge__(self, other): id(self) >= id(other)
def __eq__(self, other): id(self) == id(other)
class leafnode(node):
def __init__(self, idx):
self.idx = idx
self.children = len(idx)
class innernode(node):
def __init__(self, split_dim, split, less, greater):
self.split_dim = split_dim
self.split = split
self.less = less
self.greater = greater
self.children = less.children + greater.children
def _build(self, idx, maxes, mins):
"""
build the tree
:param idx: the data indexes which are part of this node
:param maxes: the maximum value of each dimension for this node
:param mins: the minimum value of each dimension for this node
:return: the node
"""
if len(idx) <= self.leafsize:
return KDTree.leafnode(idx)
else:
data = self.data[idx]
# I suspect these are commented out because calculating the max takes a long time and for reasonably
# distributed data does not speed up the algorithm
# maxes = np.amax(data,axis=0)
# mins = np.amin(data,axis=0)
# Fins the dimension with the biggest difference (is it lat or lon)
d = np.argmax(maxes - mins)
maxval = maxes[d]
minval = mins[d]
# the mins and maxes are not recalculated but the min/max is set to split on build so unless the split
# is on a border this is will never triger
if maxval == minval:
# all points are identical; warn user?
return KDTree.leafnode(idx)
data = data[:, d] # data is the data to split on
# sliding midpoint rule; see Maneewongvatana and Mount 1999
# for arguments that this is a good idea. (I am not sure this is implemented correctly
# because we are not sliding the min or max)
split = (maxval + minval) / 2
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(less_idx) == 0:
split = np.amin(data)
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(greater_idx) == 0:
split = np.amax(data)
less_idx = np.nonzero(data < split)[0]
greater_idx = np.nonzero(data >= split)[0]
if len(less_idx) == 0:
# _still_ zero? all must have the same value check and assign them all to the same node
if not np.all(data == data[0]):
raise ValueError("Troublesome data array: %s" % data)
return KDTree.leafnode(idx)
lessmaxes = np.copy(maxes)
lessmaxes[d] = split
greatermins = np.copy(mins)
greatermins[d] = split
return KDTree.innernode(
d,
split,
self._build(idx[less_idx], lessmaxes, mins),
self._build(idx[greater_idx], maxes, greatermins))
def _query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
side_distances = np.maximum(0, np.maximum(x - self.maxes, self.mins - x))
if p != np.inf:
side_distances **= p
min_distance = np.sum(side_distances)
else:
min_distance = np.amax(side_distances)
# priority queue for chasing nodes
# entries are:
# minimum distance between the cell and the target
# distances between the nearest side of the cell and the target
# the head node of the cell
q = [(min_distance,
tuple(side_distances),
self.tree)]
# priority queue for the nearest neighbors
# furthest known neighbor first
# entries are (-distance**p, i)
neighbors = []
if eps == 0:
epsfac = 1
elif p == np.inf:
epsfac = 1 / (1 + eps)
else:
epsfac = 1 / (1 + eps) ** p
if p != np.inf and distance_upper_bound != np.inf:
distance_upper_bound = distance_upper_bound ** p
while q:
min_distance, side_distances, node = heappop(q)
if isinstance(node, KDTree.leafnode):
# brute-force
data = self.data[node.idx]
ds = minkowski_distance_p(data, x[np.newaxis, :], p)
for i in range(len(ds)):
if ds[i] < distance_upper_bound:
if len(neighbors) == k:
heappop(neighbors)
heappush(neighbors, (-ds[i], node.idx[i]))
if len(neighbors) == k:
distance_upper_bound = -neighbors[0][0]
else:
# we don't push cells that are too far onto the queue at all,
# but since the distance_upper_bound decreases, we might get
# here even if the cell's too far
if min_distance > distance_upper_bound * epsfac:
# since this is the nearest cell, we're done, bail out
break
# compute minimum distances to the children and push them on
if x[node.split_dim] < node.split:
near, far = node.less, node.greater
else:
near, far = node.greater, node.less
# near child is at the same distance as the current node
heappush(q, (min_distance, side_distances, near))
# far child is further by an amount depending only
# on the split value
sd = list(side_distances)
if p == np.inf:
min_distance = max(min_distance, abs(node.split - x[node.split_dim]))
elif p == 1:
sd[node.split_dim] = np.abs(node.split - x[node.split_dim])
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
else:
sd[node.split_dim] = np.abs(node.split - x[node.split_dim]) ** p
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
# far child might be too far, if so, don't bother pushing it
if min_distance <= distance_upper_bound * epsfac:
heappush(q, (min_distance, tuple(sd), far))
if p == np.inf:
return sorted([(-d, i) for (d, i) in neighbors])
else:
return sorted([((-d) ** (1. / p), i) for (d, i) in neighbors])
def query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
"""
Query the kd-tree for nearest neighbors
:param x: array_like, last dimension self.m
An array of points to query.
:param k: integer
The number of nearest neighbors to return.
:param eps: nonnegative float
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
:param p: float, 1<=p<=infinity
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values "Manhattan" distance
2 is the usual Euclidean distance
infinity is the maximum-coordinate-difference distance
:param distance_upper_bound: nonnegative float
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
:returns :
d : array of floats
The distances to the nearest neighbors.
If x has shape tuple+(self.m,), then d has shape tuple if
k is one, or tuple+(k,) if k is larger than one. Missing
neighbors are indicated with infinite distances. If k is None,
then d is an object array of shape tuple, containing lists
of distances. In either case the hits are sorted by distance
(nearest first).
i : array of integers
The locations of the neighbors in self.data. i is the same
shape as d.
Examples
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 2:8]
>>> tree = spatial.KDTree(zip(x.ravel(), y.ravel()))
>>> tree.data
array([[0, 2],
[0, 3],
[0, 4],
[0, 5],
[0, 6],
[0, 7],
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6],
[1, 7],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 6],
[2, 7],
[3, 2],
[3, 3],
[3, 4],
[3, 5],
[3, 6],
[3, 7],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 7]])
>>> pts = np.array([[0, 0], [2.1, 2.9]])
>>> tree.query(pts)
(array([ 2. , 0.14142136]), array([ 0, 13]))
"""
x = np.asarray(x)
if np.shape(x)[-1] != self.m:
raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
if p < 1:
raise ValueError("Only p-norms with 1<=p<=infinity permitted")
retshape = np.shape(x)[:-1]
if retshape != ():
if k is None:
dd = np.empty(retshape, dtype=np.object)
ii = np.empty(retshape, dtype=np.object)
elif k > 1:
dd = np.empty(retshape + (k,), dtype=np.float)
dd.fill(np.inf)
ii = np.empty(retshape + (k,), dtype=np.int)
ii.fill(self.n)
elif k == 1:
dd = np.empty(retshape, dtype=np.float)
dd.fill(np.inf)
ii = np.empty(retshape, dtype=np.int)
ii.fill(self.n)
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal"
" to one, or None")
for c in np.ndindex(retshape):
hits = self._query(x[c], k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
dd[c] = [d for (d, i) in hits]
ii[c] = [i for (d, i) in hits]
elif k > 1:
for j in range(len(hits)):
dd[c + (j,)], ii[c + (j,)] = hits[j]
elif k == 1:
if len(hits) > 0:
dd[c], ii[c] = hits[0]
else:
dd[c] = np.inf
ii[c] = self.n
return dd, ii
else:
hits = self._query(x, k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
return [d for (d, i) in hits], [i for (d, i) in hits]
elif k == 1:
if len(hits) > 0:
return hits[0]
else:
return np.inf, self.n
elif k > 1:
dd = np.empty(k, dtype=np.float)
dd.fill(np.inf)
ii = np.empty(k, dtype=np.int)
ii.fill(self.n)
for j in range(len(hits)):
dd[j], ii[j] = hits[j]
return dd, ii
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or "
"equal to one, or None")
def _query_ball_point(self, x, r, p=2., eps=0):
R = Rectangle(self.maxes, self.mins)
def traverse_checking(node, rect):
if rect.min_distance_point(x, p) > r / (1. + eps):
return []
elif rect.max_distance_point(x, p) < r * (1. + eps):
return traverse_no_checking(node)
elif isinstance(node, KDTree.leafnode):
d = self.data[node.idx]
return node.idx[minkowski_distance(d, x, p) <= r].tolist()
else:
less, greater = rect.split(node.split_dim, node.split)
return traverse_checking(node.less, less) + traverse_checking(node.greater, greater)
def traverse_no_checking(node):
if isinstance(node, KDTree.leafnode):
return node.idx.tolist()
else:
return traverse_no_checking(node.less) + \
traverse_no_checking(node.greater)
return traverse_checking(self.tree, R)
def query_ball_point(self, x, r, p=2., eps=0):
"""Find all points within distance r of point(s) x.
:param x: array_like, shape tuple + (self.m,)
The point or points to search for neighbors of.
:param r: positive float
The radius of points to return.
:param p: float, optional
Which Minkowski p-norm to use. Should be in the range [1, inf].
:param eps: nonnegative float, optional
Approximate search. Branches of the tree are not explored if their
nearest points are further than ``r / (1 + eps)``, and branches are
added in bulk if their furthest points are nearer than
``r * (1 + eps)``.
:returns: list or array of lists
If `x` is a single point, returns a list of the indices of the
neighbors of `x`. If `x` is an array of points, returns an object
array of shape tuple containing lists of neighbors.
**Notes**
If you have many points whose neighbors you want to find, you may save
substantial amounts of time by putting them in a KDTree and using
query_ball_tree.
**Examples**
>>> from scipy import spatial
>>> x, y = np.mgrid[0:4, 0:4]
>>> points = zip(x.ravel(), y.ravel())
>>> tree = spatial.KDTree(points)
>>> tree.query_ball_point([2, 0], 1)
[4, 8, 9, 12]
"""
x = np.asarray(x)
if x.shape[-1] != self.m:
raise ValueError("Searching for a %d-dimensional point in a "
"%d-dimensional KDTree" % (x.shape[-1], self.m))
if len(x.shape) == 1:
return self._query_ball_point(x, r, p, eps)
else:
retshape = x.shape[:-1]
result = np.empty(retshape, dtype=np.object)
for c in np.ndindex(retshape):
result[c] = self._query_ball_point(x[c], r, p=p, eps=eps)
return result
def query_ball_tree(self, other, r, p=2., eps=0):
"""Find all pairs of points whose distance is at most r
:param other: KDTree instance
The tree containing points to search against.
:param r: float
The maximum distance, has to be positive.
:param p: float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
:param eps: float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
:returns: list of lists
For each element ``self.data[i]`` of this tree, ``results[i]`` is a
list of the indices of its neighbors in ``other.data``.
"""
results = [[] for i in range(self.n)]
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r / (1. + eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r * (1. + eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
d = other.data[node2.idx]
for i in node1.idx:
results[i] += node2.idx[minkowski_distance(d, self.data[i], p) <= r].tolist()
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1, rect1, node2.less, less)
traverse_checking(node1, rect1, node2.greater, greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less, less, node2, rect2)
traverse_checking(node1.greater, greater, node2, rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less, less1, node2.less, less2)
traverse_checking(node1.less, less1, node2.greater, greater2)
traverse_checking(node1.greater, greater1, node2.less, less2)
traverse_checking(node1.greater, greater1, node2.greater, greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
results[i] += node2.idx.tolist()
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return results
def query_pairs(self, r, p=2., eps=0):
"""
Find all pairs of points within a distance.
:param r: positive float
The maximum distance.
:param p: float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
:param eps: float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
:returns: set
Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
positions are close.
"""
results = set()
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r / (1. + eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r * (1. + eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d, self.data[i], p) <= r]:
if i < j:
results.add((i, j))
else:
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d, self.data[i], p) <= r]:
if i < j:
results.add((i, j))
elif j < i:
results.add((j, i))
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1, rect1, node2.less, less)
traverse_checking(node1, rect1, node2.greater, greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less, less, node2, rect2)
traverse_checking(node1.greater, greater, node2, rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less, less1, node2.less, less2)
traverse_checking(node1.less, less1, node2.greater, greater2)
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) != id(node2):
traverse_checking(node1.greater, greater1, node2.less, less2)
traverse_checking(node1.greater, greater1, node2.greater, greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i, j))
else:
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i, j))
elif j < i:
results.add((j, i))
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) == id(node2):
traverse_no_checking(node1.less, node2.less)
traverse_no_checking(node1.less, node2.greater)
traverse_no_checking(node1.greater, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
self.tree, Rectangle(self.maxes, self.mins))
return results
def count_neighbors(self, other, r, p=2.):
"""
Count how many nearby pairs can be formed.
Count the number of pairs (x1,x2) can be formed, with x1 drawn
from self and x2 drawn from `other`, and where
``distance(x1, x2, p) <= r``.
This is the "two-point correlation" described in Gray and Moore 2000,
"N-body problems in statistical learning", and the code here is based
on their algorithm.
:param other: KDTree instance
The other tree to draw points from.
:param r: float or one-dimensional array of floats
The radius to produce a count for. Multiple radii are searched with
a single tree traversal.
:param p: float, 1<=p<=infinity
Which Minkowski p-norm to use
:returns: int or 1-D array of ints
The number of pairs. Note that this is internally stored in a numpy
int, and so may overflow if very large (2e9).
"""
def traverse(node1, rect1, node2, rect2, idx):
min_r = rect1.min_distance_rectangle(rect2, p)
max_r = rect1.max_distance_rectangle(rect2, p)
c_greater = r[idx] > max_r
result[idx[c_greater]] += node1.children * node2.children
idx = idx[(min_r <= r[idx]) & (r[idx] <= max_r)]
if len(idx) == 0:
return
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
ds = minkowski_distance(self.data[node1.idx][:, np.newaxis, :],
other.data[node2.idx][np.newaxis, :, :],
p).ravel()
ds.sort()
result[idx] += np.searchsorted(ds, r[idx], side='right')
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1, rect1, node2.less, less, idx)
traverse(node1, rect1, node2.greater, greater, idx)
else:
if isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less, less, node2, rect2, idx)
traverse(node1.greater, greater, node2, rect2, idx)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less, less1, node2.less, less2, idx)
traverse(node1.less, less1, node2.greater, greater2, idx)
traverse(node1.greater, greater1, node2.less, less2, idx)
traverse(node1.greater, greater1, node2.greater, greater2, idx)
R1 = Rectangle(self.maxes, self.mins)
R2 = Rectangle(other.maxes, other.mins)
if np.shape(r) == ():
r = np.array([r])
result = np.zeros(1, dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(1))
return result[0]
elif len(np.shape(r)) == 1:
r = np.asarray(r)
n, = r.shape
result = np.zeros(n, dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(n))
return result
else:
raise ValueError("r must be either a single value or a one-dimensional array of values")
def sparse_distance_matrix(self, other, max_distance, p=2.):
"""
Compute a sparse distance matrix
Computes a distance matrix between two KDTrees, leaving as zero
any distance greater than max_distance.
:param other: KDTree
:param max_distance: positive float
:param p: float, optional
:returns: dok_matrix
Sparse matrix representing the results in "dictionary of keys" format.
"""
result = scipy.sparse.dok_matrix((self.n, other.n))
def traverse(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > max_distance:
return
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
for j in node2.idx:
d = minkowski_distance(self.data[i], other.data[j], p)
if d <= max_distance:
result[i, j] = d
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1, rect1, node2.less, less)
traverse(node1, rect1, node2.greater, greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less, less, node2, rect2)
traverse(node1.greater, greater, node2, rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less, less1, node2.less, less2)
traverse(node1.less, less1, node2.greater, greater2)
traverse(node1.greater, greater1, node2.less, less2)
traverse(node1.greater, greater1, node2.greater, greater2)
traverse(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return result
class HaversineDistanceKDTree(KDTree):
"""Modification of the scipy.spatial.KDTree class to allow querying for
nearest neighbours measured by distance along the Earth's surface.
"""
def __init__(self, data, leafsize=10, mask=None):
self.data = np.ma.asarray(data)
if mask is not None:
self.data.mask = np.column_stack([mask] * data.shape[1])
self.n, self.m = np.shape(self.data)
self.leafsize = int(leafsize)
if self.leafsize < 1:
raise ValueError("leafsize must be at least 1")
self.maxes = np.amax(self.data, axis=0)
self.mins = np.amin(self.data, axis=0)
indices = np.arange(self.n)
if mask is not None:
indices = np.ma.array(indices, mask=mask)
indices = indices.compressed()
self.tree = self._build(indices, self.maxes, self.mins)
def _query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
metric = np.array([1.0] * x.size)
metric[1] = math.cos(x[1] * math.pi / 180.0)
side_distances = np.maximum(0, np.maximum(x - self.maxes, self.mins - x)) * metric
if p != np.inf:
side_distances **= p
min_distance = np.sum(side_distances)
else:
min_distance = np.amax(side_distances)
# priority queue for chasing nodes
# entries are:
# minimum distance between the cell and the target
# distances between the nearest side of the cell and the target
# the head node of the cell
q = [(min_distance,
tuple(side_distances),
self.tree)]
# priority queue for the nearest neighbors
# furthest known neighbor first
# entries are (-distance**p, i)
neighbors = []
if eps == 0:
epsfac = 1
elif p == np.inf:
epsfac = 1 / (1 + eps)
else:
epsfac = 1 / (1 + eps) ** p
if p != np.inf and distance_upper_bound != np.inf:
distance_upper_bound = distance_upper_bound ** p
while q:
min_distance, side_distances, node = heappop(q)
if isinstance(node, HaversineDistanceKDTree.leafnode):
# brute-force
data = self.data[node.idx]
ds = haversine_distance(data, x[np.newaxis, :])
for i in range(len(ds)):
if ds[i] < distance_upper_bound:
if len(neighbors) == k:
heappop(neighbors)
heappush(neighbors, (-ds[i], node.idx[i]))
if len(neighbors) == k:
distance_upper_bound = -neighbors[0][0]
else:
# we don't push cells that are too far onto the queue at all,
# but since the distance_upper_bound decreases, we might get
# here even if the cell's too far
if min_distance > distance_upper_bound * epsfac:
# since this is the nearest cell, we're done, bail out
break
# compute minimum distances to the children and push them on
if x[node.split_dim] < node.split:
near, far = node.less, node.greater
else:
near, far = node.greater, node.less
# near child is at the same distance as the current node
heappush(q, (min_distance, side_distances, near))
# far child is further by an amount depending only
# on the split value
sd = list(side_distances)
if p == np.inf:
min_distance = max(min_distance, abs(node.split - x[node.split_dim]))
elif p == 1:
| sd[node.split_dim] = np.abs(node.split - x[node.split_dim]) | 5,233 | lcc_e | python | null | dacd21514a5f3df4693975193338f4a10d81f39af653e3ec |
|
# -*- coding: utf-8 -*-
from datetime import datetime
import json
import logging
import re
from urllib import urlencode
try:
from cStringIO import cStringIO as StringIO
except:
from StringIO import StringIO
import newrelic.agent
from pyquery import PyQuery as pq
from tower import ugettext_lazy as _lazy, ugettext as _
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.http import (HttpResponse, HttpResponseRedirect,
HttpResponsePermanentRedirect,
Http404, HttpResponseBadRequest)
from django.http.multipartparser import MultiPartParser
from django.shortcuts import (get_object_or_404, get_list_or_404,
redirect, render)
from django.utils.safestring import mark_safe
from django.views.decorators.http import (require_GET, require_POST,
require_http_methods, condition)
from django.views.decorators.clickjacking import (xframe_options_exempt,
xframe_options_sameorigin)
from django.views.decorators.csrf import csrf_exempt
from constance import config
from jingo.helpers import urlparams
from ratelimit.decorators import ratelimit
from smuggler.forms import ImportForm
from teamwork.shortcuts import get_object_or_404_or_403
import waffle
from kuma.authkeys.decorators import accepts_auth_key
from kuma.contentflagging.models import ContentFlag, FLAG_NOTIFICATIONS
from kuma.attachments.forms import AttachmentRevisionForm
from kuma.attachments.models import Attachment
from kuma.attachments.utils import attachments_json, full_attachment_url
from kuma.core.cache import memcache
from kuma.core.decorators import (never_cache, login_required,
permission_required, superuser_required)
from kuma.core.urlresolvers import reverse
from kuma.core.utils import (get_object_or_none, paginate, smart_int,
get_ip, limit_banned_ip_to_0)
from kuma.search.store import referrer_url
from kuma.users.models import UserProfile
import kuma.wiki.content
from . import kumascript
from .constants import (DOCUMENTS_PER_PAGE, TEMPLATE_TITLE_PREFIX,
SLUG_CLEANSING_REGEX, REVIEW_FLAG_TAGS_DEFAULT,
DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL,
REDIRECT_CONTENT, ALLOWED_TAGS)
from .decorators import (check_readonly, process_document_path,
allow_CORS_GET, prevent_indexing)
from .events import EditDocumentEvent
from .forms import (DocumentForm, RevisionForm, DocumentContentFlagForm,
RevisionValidationForm, TreeMoveForm,
DocumentDeletionForm)
from .helpers import format_comment
from .models import (Document, Revision, HelpfulVote, EditorToolbar,
DocumentZone, DocumentTag, ReviewTag, LocalizationTag,
DocumentDeletionLog,
DocumentRenderedContentNotAvailable,
RevisionIP)
from .queries import MultiQuerySet
from .tasks import move_page, send_first_edit_email
from .utils import locale_and_slug_from_path
log = logging.getLogger('kuma.wiki.views')
@newrelic.agent.function_trace()
def _document_last_modified(request, document_slug, document_locale):
"""
Utility function to derive the last modified timestamp of a document.
Mainly for the @condition decorator.
"""
# build an adhoc natural cache key to not have to do DB query
adhoc_natural_key = (document_locale, document_slug)
natural_key_hash = Document.natural_key_hash(adhoc_natural_key)
cache_key = DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL % natural_key_hash
try:
last_mod = memcache.get(cache_key)
if last_mod is None:
doc = Document.objects.get(locale=document_locale,
slug=document_slug)
last_mod = doc.fill_last_modified_cache()
# Convert the cached Unix epoch seconds back to Python datetime
return datetime.fromtimestamp(float(last_mod))
except Document.DoesNotExist:
return None
def _split_slug(slug):
"""Utility function to do basic slug splitting"""
slug_split = slug.split('/')
length = len(slug_split)
root = None
seo_root = ''
bad_seo_roots = ['Web']
if length > 1:
root = slug_split[0]
if root in bad_seo_roots:
if length > 2:
seo_root = root + '/' + slug_split[1]
else:
seo_root = root
specific = slug_split.pop()
parent = '/'.join(slug_split)
return {'specific': specific, 'parent': parent,
'full': slug, 'parent_split': slug_split, 'length': length,
'root': root, 'seo_root': seo_root}
def _join_slug(parent_split, slug):
parent_split.append(slug)
return '/'.join(parent_split)
#####################################################################
#
# Utility functions which support the document() view and its various
# sub-views.
#
#####################################################################
def _get_doc_and_fallback_reason(document_locale, document_slug):
"""
Attempt to fetch a Document at the given locale and slug, and
return it, or return a fallback reason if we weren't able to.
"""
doc = None
fallback_reason = None
try:
doc = Document.objects.get(locale=document_locale, slug=document_slug)
if (not doc.current_revision and doc.parent and
doc.parent.current_revision):
# This is a translation but its current_revision is None
# and OK to fall back to parent (parent is approved).
fallback_reason = 'translation_not_approved'
elif not doc.current_revision:
fallback_reason = 'no_content'
except Document.DoesNotExist:
pass
return doc, fallback_reason
def _check_for_deleted_document(document_locale, document_slug):
"""
If a Document is not found, see if there's a deletion log for it.
"""
return DocumentDeletionLog.objects.filter(
locale=document_locale,
slug=document_slug
)
def _default_locale_fallback(request, document_slug, document_locale):
"""
If we're falling back to a Document in the default locale, figure
out why and whether we can redirect to a translation in the
requested locale.
"""
fallback_doc = None
redirect_url = None
fallback_reason = None
try:
fallback_doc = Document.objects.get(locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=document_slug)
# If there's a translation to the requested locale, take it:
translation = fallback_doc.translated_to(document_locale)
if translation and translation.current_revision:
url = translation.get_absolute_url()
redirect_url = urlparams(url, query_dict=request.GET)
elif translation and fallback_doc.current_revision:
# Found a translation but its current_revision is None
# and OK to fall back to parent (parent is approved).
fallback_reason = 'translation_not_approved'
elif fallback_doc.current_revision:
# There is no translation
# and OK to fall back to parent (parent is approved).
fallback_reason = 'no_translation'
except Document.DoesNotExist:
pass
return fallback_doc, fallback_reason, redirect_url
def _document_redirect_to_create(document_slug, document_locale, slug_dict):
"""
When a Document doesn't exist but the user can create it, return
the creation URL to redirect to.
"""
url = reverse('wiki.new_document', locale=document_locale)
if slug_dict['length'] > 1:
parent_doc = get_object_or_404(Document,
locale=document_locale,
slug=slug_dict['parent'],
is_template=0)
url = urlparams(url, parent=parent_doc.id,
slug=slug_dict['specific'])
else:
# This is a "base level" redirect, i.e. no parent
url = urlparams(url, slug=document_slug)
return url
def _check_404_params(request):
"""
If a Document is not found, we may 404 immediately based on
request parameters.
"""
params = []
for request_param in ('raw', 'include', 'nocreate'):
params.append(request.GET.get(request_param, None))
return any(params) or (not request.user.is_authenticated())
def _set_common_headers(doc, section_id, response):
"""
Perform some response-header manipulation that gets used in
several places.
"""
response['ETag'] = doc.calculate_etag(section_id)
if doc.current_revision_id:
response['X-kuma-revision'] = doc.current_revision_id
return response
def _get_html_and_errors(request, doc, rendering_params):
"""
Get the initial HTML for a Document, including determining whether
to use kumascript to render it.
"""
doc_html, ks_errors = doc.html, None
render_raw_fallback = False
base_url = request.build_absolute_uri('/')
if rendering_params['use_rendered']:
if (request.GET.get('bleach_new', False) is not False and
request.user.is_authenticated()):
# Temporary bleach_new query option to switch to Constance-based
# Bleach whitelists, uses KumaScript POST for temporary rendering
doc_html, ks_errors = kumascript.post(request, doc_html,
request.locale, True)
else:
# A logged-in user can schedule a full re-render with Shift-Reload
cache_control = None
if request.user.is_authenticated():
# Shift-Reload sends Cache-Control: no-cache
ua_cc = request.META.get('HTTP_CACHE_CONTROL')
if ua_cc == 'no-cache':
cache_control = 'no-cache'
try:
r_body, r_errors = doc.get_rendered(cache_control, base_url)
if r_body:
doc_html = r_body
if r_errors:
ks_errors = r_errors
except DocumentRenderedContentNotAvailable:
# There was no rendered content available, and we were unable
# to render it on the spot. So, fall back to presenting raw
# content
render_raw_fallback = True
return doc_html, ks_errors, render_raw_fallback
def _generate_toc_html(doc, rendering_params):
"""
Generate the HTML, if needed, for a Document's table of contents.
"""
toc_html = None
if doc.show_toc and not rendering_params['raw']:
toc_html = doc.get_toc_html()
return toc_html
def _filter_doc_html(request, doc, doc_html, rendering_params):
"""
Apply needed filtering/annotating operations to a Document's HTML.
"""
# If ?summary is on, just serve up the summary as doc HTML
if rendering_params['summary']:
return doc.get_summary_html()
# Shortcut the parsing & filtering, if none of these relevant rendering
# params are set.
if not (rendering_params['section'] or rendering_params['raw'] or
rendering_params['edit_links'] or rendering_params['include']):
return doc_html
# TODO: One more view-time content parsing instance to refactor
tool = kuma.wiki.content.parse(doc_html)
# ?raw view is often used for editors - apply safety filtering.
# TODO: Should this stuff happen in render() itself?
if rendering_params['raw']:
# HACK: Raw rendered content has not had section IDs injected
tool.injectSectionIDs()
tool.filterEditorSafety()
# If a section ID is specified, extract that section.
# TODO: Pre-extract every section on render? Might be over-optimization
if rendering_params['section']:
tool.extractSection(rendering_params['section'])
# If this user can edit the document, inject section editing links.
# TODO: Rework so that this happens on the client side?
if ((rendering_params['edit_links'] or not rendering_params['raw']) and
request.user.is_authenticated() and
doc.allows_revision_by(request.user)):
tool.injectSectionEditingLinks(doc.slug, doc.locale)
doc_html = tool.serialize()
# If this is an include, filter out the class="noinclude" blocks.
# TODO: Any way to make this work in rendering? Possibly over-optimization,
# because this is often paired with ?section - so we'd need to store every
# section twice for with & without include sections
if rendering_params['include']:
doc_html = kuma.wiki.content.filter_out_noinclude(doc_html)
return doc_html
def _get_seo_parent_title(slug_dict, document_locale):
"""
Get parent-title information for SEO purposes.
"""
if slug_dict['seo_root']:
seo_root_doc = get_object_or_404(Document,
locale=document_locale,
slug=slug_dict['seo_root'])
return u' - %s' % seo_root_doc.title
else:
return ''
#####################################################################
#
# Specialized sub-views which may be called by document().
#
#####################################################################
@newrelic.agent.function_trace()
@allow_CORS_GET
@prevent_indexing
def _document_deleted(request, deletion_logs):
"""
When a Document has been deleted, display a notice.
"""
deletion_log = deletion_logs.order_by('-pk')[0]
context = {'deletion_log': deletion_log}
return render(request, 'wiki/deletion_log.html', context, status=404)
@newrelic.agent.function_trace()
@allow_CORS_GET
def _document_raw(request, doc, doc_html, rendering_params):
"""
Display a raw Document.
"""
response = HttpResponse(doc_html)
response['X-Frame-Options'] = 'Allow'
response['X-Robots-Tag'] = 'noindex'
absolute_url = doc.get_absolute_url()
if absolute_url in (config.KUMA_CUSTOM_CSS_PATH,
config.KUMA_CUSTOM_SAMPLE_CSS_PATH):
response['Content-Type'] = 'text/css; charset=utf-8'
elif doc.is_template:
# Treat raw, un-bleached template source as plain text, not HTML.
response['Content-Type'] = 'text/plain; charset=utf-8'
return _set_common_headers(doc, rendering_params['section'], response)
@csrf_exempt
@require_http_methods(['GET', 'PUT', 'HEAD'])
@allow_CORS_GET
@accepts_auth_key
@process_document_path
@condition(last_modified_func=_document_last_modified)
@newrelic.agent.function_trace()
def document(request, document_slug, document_locale):
"""
View a wiki document.
"""
# PUT requests go to the write API.
if request.method == 'PUT':
if (not request.authkey and not request.user.is_authenticated()):
raise PermissionDenied
return _document_PUT(request,
document_slug,
document_locale)
fallback_reason = None
slug_dict = _split_slug(document_slug)
# Is there a document at this slug, in this locale?
doc, fallback_reason = _get_doc_and_fallback_reason(document_locale,
document_slug)
if doc is None:
# Possible the document once existed, but is now deleted.
# If so, show that it was deleted.
deletion_logs = _check_for_deleted_document(document_locale,
document_slug)
if deletion_logs.exists():
return _document_deleted(request, deletion_logs)
# We can throw a 404 immediately if the request type is HEAD.
# TODO: take a shortcut if the document was found?
if request.method == 'HEAD':
raise Http404
# Check if we should fall back to default locale.
fallback_doc, fallback_reason, redirect_url = _default_locale_fallback(
request, document_slug, document_locale)
if fallback_doc is not None:
doc = fallback_doc
if redirect_url is not None:
return redirect(redirect_url)
else:
if _check_404_params(request):
raise Http404
# The user may be trying to create a child page; if a parent exists
# for this document, redirect them to the "Create" page
# Otherwise, they could be trying to create a main level doc.
create_url = _document_redirect_to_create(document_slug,
document_locale,
slug_dict)
return redirect(create_url)
# We found a Document. Now we need to figure out how we're going
# to display it.
# Step 1: If we're a redirect, and redirecting hasn't been
# disabled, redirect.
# Obey explicit redirect pages:
# Don't redirect on redirect=no (like Wikipedia), so we can link from a
# redirected-to-page back to a "Redirected from..." link, so you can edit
# the redirect.
redirect_url = (None if request.GET.get('redirect') == 'no'
else doc.redirect_url())
if redirect_url and redirect_url != doc.get_absolute_url():
url = urlparams(redirect_url, query_dict=request.GET)
# TODO: Re-enable the link in this message after Django >1.5 upgrade
# Redirected from <a href="%(url)s?redirect=no">%(url)s</a>
messages.add_message(
request, messages.WARNING,
mark_safe(_(u'Redirected from %(url)s') % {
"url": request.build_absolute_uri(doc.get_absolute_url())
}), extra_tags='wiki_redirect')
return HttpResponsePermanentRedirect(url)
# Step 2: Kick 'em out if they're not allowed to view this Document.
if not request.user.has_perm('wiki.view_document', doc):
raise PermissionDenied
# Step 3: Read some request params to see what we're supposed to
# do.
rendering_params = {}
for param in ('raw', 'summary', 'include', 'edit_links'):
rendering_params[param] = request.GET.get(param, False) is not False
rendering_params['section'] = request.GET.get('section', None)
rendering_params['render_raw_fallback'] = False
rendering_params['use_rendered'] = kumascript.should_use_rendered(doc, request.GET)
# Step 4: Get us some HTML to play with.
doc_html, ks_errors, render_raw_fallback = _get_html_and_errors(
request, doc, rendering_params)
rendering_params['render_raw_fallback'] = render_raw_fallback
toc_html = None
# Step 5: Start parsing and applying filters.
if not doc.is_template:
toc_html = _generate_toc_html(doc, rendering_params)
doc_html = _filter_doc_html(request, doc, doc_html, rendering_params)
# Step 6: If we're doing raw view, bail out to that now.
if rendering_params['raw']:
return _document_raw(request, doc, doc_html, rendering_params)
# Get the contributors. (To avoid this query, we could render the
# the contributors right into the Document's html field.)
# NOTE: .only() avoids a memcache object-too-large error for large wiki
# pages when an attempt is made to cache all revisions
contributors = set([r.creator for r in doc.revisions
.filter(is_approved=True)
.only('creator')
.select_related('creator')])
# TODO: Port this kitsune feature over, eventually:
# https://github.com/jsocol/kitsune/commit/
# f1ebb241e4b1d746f97686e65f49e478e28d89f2
# Get the SEO summary
seo_summary = ''
if not doc.is_template:
seo_summary = doc.get_summary_text()
# Get the additional title information, if necessary.
seo_parent_title = _get_seo_parent_title(slug_dict, document_locale)
# Retrieve file attachments
attachments = attachments_json(doc.attachments)
# Retrieve pre-parsed content hunks
if doc.is_template:
quick_links_html, zone_subnav_html = None, None
body_html = doc_html
else:
quick_links_html = doc.get_quick_links_html()
zone_subnav_html = doc.get_zone_subnav_html()
body_html = doc.get_body_html()
share_text = _('I learned about %(title)s on MDN.') % {"title": doc.title, }
# Step 8: Bundle it all up and, finally, return.
context = {
'document': doc,
'document_html': doc_html,
'toc_html': toc_html,
'quick_links_html': quick_links_html,
'zone_subnav_html': zone_subnav_html,
'body_html': body_html,
'contributors': contributors,
'fallback_reason': fallback_reason,
'kumascript_errors': ks_errors,
'render_raw_fallback': rendering_params['render_raw_fallback'],
'seo_summary': seo_summary,
'seo_parent_title': seo_parent_title,
'share_text': share_text,
'attachment_data': attachments,
'attachment_data_json': json.dumps(attachments),
'search_url': referrer_url(request) or '',
}
response = render(request, 'wiki/document.html', context)
return _set_common_headers(doc, rendering_params['section'], response)
def _document_PUT(request, document_slug, document_locale):
"""Handle PUT requests as document write API"""
# Try parsing one of the supported content types from the request
try:
content_type = request.META.get('CONTENT_TYPE', '')
if content_type.startswith('application/json'):
data = json.loads(request.body)
elif content_type.startswith('multipart/form-data'):
parser = MultiPartParser(request.META,
StringIO(request.body),
request.upload_handlers,
request.encoding)
data, files = parser.parse()
elif content_type.startswith('text/html'):
# TODO: Refactor this into wiki.content ?
# First pass: Just assume the request body is an HTML fragment.
html = request.body
data = dict(content=html)
# Second pass: Try parsing the body as a fuller HTML document,
# and scrape out some of the interesting parts.
try:
doc = pq(html)
head_title = doc.find('head title')
if head_title.length > 0:
data['title'] = head_title.text()
body_content = doc.find('body')
if body_content.length > 0:
data['content'] = body_content.html()
except:
pass
else:
resp = HttpResponse()
resp.status_code = 400
resp.content = _("Unsupported content-type: %s") % content_type
return resp
except Exception, e:
resp = HttpResponse()
resp.status_code = 400
resp.content = _("Request parsing error: %s") % e
return resp
try:
# Look for existing document to edit:
doc = Document.objects.get(locale=document_locale,
slug=document_slug)
if not doc.allows_revision_by(request.user):
raise PermissionDenied
section_id = request.GET.get('section', None)
is_new = False
# Use ETags to detect mid-air edit collision
# see: http://www.w3.org/1999/04/Editing/
expected_etag = request.META.get('HTTP_IF_MATCH', False)
if expected_etag:
curr_etag = doc.calculate_etag(section_id)
if curr_etag != expected_etag:
resp = HttpResponse()
resp.status_code = 412
resp.content = _('ETag precondition failed')
return resp
except Document.DoesNotExist:
# No existing document, so this is an attempt to create a new one...
if not Document.objects.allows_add_by(request.user, document_slug):
raise PermissionDenied
# TODO: There should be a model utility for creating a doc...
# Let's see if this slug path implies a parent...
slug_parts = _split_slug(document_slug)
if not slug_parts['parent']:
# Apparently, this is a root page!
parent_doc = None
else:
# There's a parent implied, so make sure we can find it.
parent_doc = get_object_or_404(Document, locale=document_locale,
slug=slug_parts['parent'])
# Create and save the new document; we'll revise it immediately.
doc = Document(slug=document_slug, locale=document_locale,
title=data.get('title', document_slug),
parent_topic=parent_doc,
category=Document.CATEGORIES[0][0])
doc.save()
section_id = None # No section editing for new document!
is_new = True
new_rev = doc.revise(request.user, data, section_id)
doc.schedule_rendering('max-age=0')
request.authkey.log(is_new and 'created' or 'updated',
new_rev, data.get('summary', None))
resp = HttpResponse()
if not is_new:
resp.content = 'RESET'
resp.status_code = 205
else:
resp.content = 'CREATED'
new_loc = request.build_absolute_uri(doc.get_absolute_url())
resp['Location'] = new_loc
resp.status_code = 201
return resp
@prevent_indexing
@process_document_path
@newrelic.agent.function_trace()
def revision(request, document_slug, document_locale, revision_id):
"""View a wiki document revision."""
rev = get_object_or_404(Revision, pk=revision_id,
document__slug=document_slug)
data = {'document': rev.document,
'revision': rev,
'comment': format_comment(rev)}
return render(request, 'wiki/revision.html', data)
@require_GET
def list_documents(request, category=None, tag=None):
"""List wiki documents."""
category_id = None
if category:
try:
category_id = int(category)
category = unicode(dict(Document.CATEGORIES)[category_id])
except (KeyError, ValueError):
raise Http404
# Taggit offers a slug - but use name here, because the slugification
# stinks and is hard to customize.
tag_obj = None
if tag:
matching_tags = get_list_or_404(DocumentTag, name__iexact=tag)
for matching_tag in matching_tags:
if matching_tag.name.lower() == tag.lower():
tag_obj = matching_tag
break
docs = Document.objects.filter_for_list(locale=request.locale,
category=category_id,
tag=tag_obj)
paginated_docs = paginate(request, docs, per_page=DOCUMENTS_PER_PAGE)
context = {
'documents': paginated_docs,
'count': docs.count(),
'category': category,
'tag': tag,
}
return render(request, 'wiki/list_documents.html', context)
@require_GET
def list_templates(request):
"""Returns listing of all templates"""
docs = Document.objects.filter(is_template=True).order_by('title')
paginated_docs = paginate(request, docs, per_page=DOCUMENTS_PER_PAGE)
context = {
'documents': paginated_docs,
'count': docs.count(),
'is_templates': True,
}
return render(request, 'wiki/list_documents.html', context)
@require_GET
def list_tags(request):
"""Returns listing of all tags"""
tags = DocumentTag.objects.order_by('name')
tags = paginate(request, tags, per_page=DOCUMENTS_PER_PAGE)
return render(request, 'wiki/list_tags.html', {'tags': tags})
@require_GET
def list_documents_for_review(request, tag=None):
"""Lists wiki documents with revisions flagged for review"""
tag_obj = tag and get_object_or_404(ReviewTag, name=tag) or None
docs = Document.objects.filter_for_review(locale=request.locale, tag=tag_obj)
paginated_docs = paginate(request, docs, per_page=DOCUMENTS_PER_PAGE)
context = {
'documents': paginated_docs,
'count': docs.count(),
'tag': tag_obj,
'tag_name': tag,
}
return render(request, 'wiki/list_documents_for_review.html', context)
@require_GET
def list_documents_with_localization_tag(request, tag=None):
"""Lists wiki documents with localization tag"""
tag_obj = tag and get_object_or_404(LocalizationTag, name=tag) or None
docs = Document.objects.filter_with_localization_tag(locale=request.locale,
tag=tag_obj)
paginated_docs = paginate(request, docs, per_page=DOCUMENTS_PER_PAGE)
context = {
'documents': paginated_docs,
'count': docs.count(),
'tag': tag_obj,
'tag_name': tag,
}
return render(request, 'wiki/list_documents_with_localization_tags.html',
context)
@require_GET
def list_documents_with_errors(request):
"""Lists wiki documents with (KumaScript) errors"""
docs = Document.objects.filter_for_list(locale=request.locale, errors=True)
paginated_docs = paginate(request, docs, per_page=DOCUMENTS_PER_PAGE)
context = {
'documents': paginated_docs,
'count': docs.count(),
'errors': True,
}
return render(request, 'wiki/list_documents.html', context)
@require_GET
def list_documents_without_parent(request):
"""Lists wiki documents without parent (no English source document)"""
docs = Document.objects.filter_for_list(locale=request.locale,
noparent=True)
paginated_docs = paginate(request, docs, per_page=DOCUMENTS_PER_PAGE)
context = {
'documents': paginated_docs,
'count': docs.count(),
'noparent': True,
}
return render(request, 'wiki/list_documents.html', context)
@require_GET
def list_top_level_documents(request):
"""Lists documents directly under /docs/"""
docs = Document.objects.filter_for_list(locale=request.locale,
toplevel=True)
paginated_docs = paginate(request, docs, per_page=DOCUMENTS_PER_PAGE)
context = {
'documents': paginated_docs,
'count': docs.count(),
'toplevel': True,
}
return render(request, 'wiki/list_documents.html', context)
@login_required
@check_readonly
@prevent_indexing
@never_cache
@newrelic.agent.function_trace()
def new_document(request):
"""Create a new wiki document."""
initial_slug = request.GET.get('slug', '')
initial_title = initial_slug.replace('_', ' ')
initial_parent_id = ''
try:
initial_parent_id = int(request.GET.get('parent', ''))
except ValueError:
pass
clone_id = None
try:
clone_id = int(request.GET.get('clone', ''))
except ValueError:
pass
if not Document.objects.allows_add_by(request.user, initial_slug):
# Try to head off disallowed Template:* creation, right off the bat
raise PermissionDenied
is_template = initial_slug.startswith(TEMPLATE_TITLE_PREFIX)
# If a parent ID is provided via GET, confirm it exists
parent_slug = parent_path = ''
if initial_parent_id:
try:
parent_doc = Document.objects.get(pk=initial_parent_id)
parent_slug = parent_doc.slug
parent_path = parent_doc.get_absolute_url()
except Document.DoesNotExist:
pass
if request.method == 'GET':
initial_data = {}
initial_html = ''
initial_tags = ''
initial_toc = Revision.TOC_DEPTH_H4
if clone_id:
try:
clone_doc = Document.objects.get(pk=clone_id)
initial_title = clone_doc.title
initial_html = clone_doc.html
initial_tags = clone_doc.tags.all()
if clone_doc.current_revision:
initial_toc = clone_doc.current_revision.toc_depth
else:
initial_toc = 1
except Document.DoesNotExist:
pass
if parent_slug:
initial_data['parent_topic'] = initial_parent_id
if initial_slug:
initial_data['title'] = initial_title
initial_data['slug'] = initial_slug
if is_template:
review_tags = ('template',)
else:
review_tags = REVIEW_FLAG_TAGS_DEFAULT
doc_form = DocumentForm(initial=initial_data)
rev_form = RevisionForm(initial={
'slug': initial_slug,
'title': initial_title,
'content': initial_html,
'review_tags': review_tags,
'tags': initial_tags,
'toc_depth': initial_toc
})
allow_add_attachment = (
Attachment.objects.allow_add_attachment_by(request.user))
context = {
'is_template': is_template,
'parent_slug': parent_slug,
'parent_id': initial_parent_id,
'document_form': doc_form,
'revision_form': rev_form,
'WIKI_DOCUMENT_TAG_SUGGESTIONS': config.WIKI_DOCUMENT_TAG_SUGGESTIONS,
'initial_tags': initial_tags,
'allow_add_attachment': allow_add_attachment,
'attachment_form': AttachmentRevisionForm(),
'parent_path': parent_path}
return render(request, 'wiki/new_document.html', context)
post_data = request.POST.copy()
posted_slug = post_data['slug']
post_data.update({'locale': request.locale})
if parent_slug:
post_data.update({'parent_topic': initial_parent_id})
post_data.update({'slug': parent_slug + '/' + post_data['slug']})
doc_form = DocumentForm(post_data)
rev_form = RevisionValidationForm(request.POST.copy())
rev_form.parent_slug = parent_slug
if doc_form.is_valid() and rev_form.is_valid():
rev_form = RevisionForm(post_data)
if rev_form.is_valid():
slug = doc_form.cleaned_data['slug']
if not Document.objects.allows_add_by(request.user, slug):
raise PermissionDenied
doc = doc_form.save(None)
_save_rev_and_notify(rev_form, request, doc)
if doc.current_revision.is_approved:
view = 'wiki.document'
else:
view = 'wiki.document_revisions'
return HttpResponseRedirect(reverse(view, args=[doc.slug]))
else:
doc_form.data['slug'] = posted_slug
else:
doc_form.data['slug'] = posted_slug
allow_add_attachment = (
Attachment.objects.allow_add_attachment_by(request.user))
context = {
'is_template': is_template,
'document_form': doc_form,
'revision_form': rev_form,
'WIKI_DOCUMENT_TAG_SUGGESTIONS': config.WIKI_DOCUMENT_TAG_SUGGESTIONS,
'allow_add_attachment': allow_add_attachment,
'attachment_form': AttachmentRevisionForm(),
'parent_slug': parent_slug,
'parent_path': parent_path}
return render(request, 'wiki/new_document.html', context)
@require_http_methods(['GET', 'POST'])
@login_required # TODO: Stop repeating this knowledge here and in Document.allows_editing_by.
@ratelimit(key='user', rate=limit_banned_ip_to_0, block=True)
@process_document_path
@check_readonly
@prevent_indexing
@never_cache
@newrelic.agent.function_trace()
def edit_document(request, document_slug, document_locale, revision_id=None):
"""Create a new revision of a wiki document, or edit document metadata."""
doc = get_object_or_404_or_403('wiki.add_revision',
request.user,
Document,
locale=document_locale,
slug=document_slug)
user = request.user
# If this document has a parent, then the edit is handled by the
# translate view. Pass it on.
if doc.parent and doc.parent.id != doc.id:
return translate(request, doc.parent.slug, doc.locale, revision_id,
bypass_process_document_path=True)
if revision_id:
rev = get_object_or_404(Revision, pk=revision_id, document=doc)
else:
rev = doc.current_revision or doc.revisions.order_by('-created',
'-id')[0]
# Keep hold of the full post slug
slug_dict = _split_slug(document_slug)
# Update the slug, removing the parent path, and
# *only* using the last piece.
# This is only for the edit form.
rev.slug = slug_dict['specific']
section_id = request.GET.get('section', None)
if section_id and not request.is_ajax():
return HttpResponse(_("Sections may only be edited inline."))
disclose_description = bool(request.GET.get('opendescription'))
doc_form = rev_form = None
if doc.allows_revision_by(user):
rev_form = RevisionForm(instance=rev,
initial={'based_on': rev.id,
'current_rev': rev.id,
'comment': ''},
section_id=section_id)
if doc.allows_editing_by(user):
doc_form = DocumentForm(initial=_document_form_initial(doc))
# Need to make check *here* to see if this could have a translation parent
show_translation_parent_block = (
(document_locale != settings.WIKI_DEFAULT_LANGUAGE) and
(not doc.parent_id))
if request.method == 'GET':
if not (rev_form or doc_form):
# You can't do anything on this page, so get lost.
raise PermissionDenied
else: # POST
is_iframe_target = request.GET.get('iframe', False)
is_raw = request.GET.get('raw', False)
need_edit_links = request.GET.get('edit_links', False)
parent_id = request.POST.get('parent_id', '')
# Attempt to set a parent
if show_translation_parent_block and parent_id:
try:
parent_doc = get_object_or_404(Document, id=parent_id)
doc.parent = parent_doc
except Document.DoesNotExist:
pass
# Comparing against localized names for the Save button bothers me, so
# I embedded a hidden input:
which_form = request.POST.get('form')
if which_form == 'doc':
if doc.allows_editing_by(user):
post_data = request.POST.copy()
post_data.update({'locale': document_locale})
doc_form = DocumentForm(post_data, instance=doc)
if doc_form.is_valid():
# if must be here for section edits
if 'slug' in post_data:
post_data['slug'] = _join_slug(
slug_dict['parent_split'], post_data['slug'])
# Get the possibly new slug for the imminent redirection:
doc = doc_form.save(None)
if is_iframe_target:
# TODO: Does this really need to be a template? Just
# shoehorning data into a single HTML element.
response = HttpResponse("""
<span id="iframe-response"
data-status="OK"
data-current-revision="%s">OK</span>
""" % doc.current_revision.id)
response['X-Frame-Options'] = 'SAMEORIGIN'
return response
return HttpResponseRedirect(
urlparams(reverse('wiki.edit_document',
args=[doc.slug]),
opendescription=1))
disclose_description = True
else:
raise PermissionDenied
elif which_form == 'rev':
if not doc.allows_revision_by(user):
raise PermissionDenied
else:
post_data = request.POST.copy()
rev_form = RevisionForm(post_data,
is_iframe_target=is_iframe_target,
section_id=section_id)
rev_form.instance.document = doc # for rev_form.clean()
# Come up with the original revision to which these changes
# would be applied.
orig_rev_id = request.POST.get('current_rev', False)
if orig_rev_id is False:
orig_rev = None
else:
orig_rev = Revision.objects.get(pk=orig_rev_id)
# Get the document's actual current revision.
curr_rev = doc.current_revision
if not rev_form.is_valid():
# Was there a mid-air collision?
if 'current_rev' in rev_form._errors:
# Jump out to a function to escape indentation hell
return _edit_document_collision(
request, orig_rev, curr_rev, is_iframe_target,
is_raw, rev_form, doc_form, section_id,
rev, doc)
if rev_form.is_valid():
_save_rev_and_notify(rev_form, request, doc)
if is_iframe_target:
# TODO: Does this really need to be a template? Just
# shoehorning data into a single HTML element.
response = HttpResponse("""
<span id="iframe-response"
data-status="OK"
data-current-revision="%s">OK</span>
""" % doc.current_revision.id)
response['X-Frame-Options'] = 'SAMEORIGIN'
return response
if (is_raw and orig_rev is not None and
curr_rev.id != orig_rev.id):
# If this is the raw view, and there was an original
# revision, but the original revision differed from the
# current revision at start of editing, we should tell
# the client to refresh the page.
response = HttpResponse('RESET')
response['X-Frame-Options'] = 'SAMEORIGIN'
response.status_code = 205
return response
if rev_form.instance.is_approved:
view = 'wiki.document'
else:
view = 'wiki.document_revisions'
# Construct the redirect URL, adding any needed parameters
url = reverse(view, args=[doc.slug], locale=doc.locale)
params = {}
if is_raw:
params['raw'] = 'true'
if need_edit_links:
# Only need to carry over ?edit_links with ?raw,
# because they're on by default in the normal UI
params['edit_links'] = 'true'
if section_id:
# If a section was edited, and we're using the raw
# content API, constrain to that section.
params['section'] = section_id
if params:
url = '%s?%s' % (url, urlencode(params))
if not is_raw and section_id:
# If a section was edited, jump to the section anchor
# if we're not getting raw content.
url = '%s#%s' % (url, section_id)
return HttpResponseRedirect(url)
parent_path = parent_slug = ''
if slug_dict['parent']:
parent_slug = slug_dict['parent']
if doc.parent_topic_id:
parent_doc = Document.objects.get(pk=doc.parent_topic_id)
parent_path = parent_doc.get_absolute_url()
parent_slug = parent_doc.slug
attachments = attachments_json(doc.attachments)
allow_add_attachment = (
Attachment.objects.allow_add_attachment_by(request.user))
context = {
'revision_form': rev_form,
'document_form': doc_form,
'section_id': section_id,
'disclose_description': disclose_description,
'parent_slug': parent_slug,
'parent_path': parent_path,
'revision': rev,
'document': doc,
'allow_add_attachment': allow_add_attachment,
'attachment_form': AttachmentRevisionForm(),
'attachment_data': attachments,
'WIKI_DOCUMENT_TAG_SUGGESTIONS': config.WIKI_DOCUMENT_TAG_SUGGESTIONS,
'attachment_data_json': json.dumps(attachments)
}
return render(request, 'wiki/edit_document.html', context)
@xframe_options_sameorigin
def _edit_document_collision(request, orig_rev, curr_rev, is_iframe_target,
is_raw, rev_form, doc_form, section_id, rev, doc):
"""Handle when a mid-air collision is detected upon submission"""
# Process the content as if it were about to be saved, so that the
# html_diff is close as possible.
content = (kuma.wiki.content.parse(request.POST['content'])
.injectSectionIDs()
.serialize())
# Process the original content for a diff, extracting a section if we're
# editing one.
if (doc.is_template):
curr_content = curr_rev.content
else:
tool = kuma.wiki.content.parse(curr_rev.content)
tool.injectSectionIDs()
if section_id:
tool.extractSection(section_id)
curr_content = tool.serialize()
if is_raw:
# When dealing with the raw content API, we need to signal the conflict
# differently so the client-side can escape out to a conflict
# resolution UI.
response = HttpResponse('CONFLICT')
response.status_code = 409
return response
# Make this response iframe-friendly so we can hack around the
# save-and-edit iframe button
context = {
'collision': True,
'revision_form': rev_form,
'document_form': doc_form,
'content': content,
'current_content': curr_content,
'section_id': section_id,
'original_revision': orig_rev,
'current_revision': curr_rev,
'revision': rev,
'document': doc,
}
return render(request, 'wiki/edit_document.html', context)
@require_http_methods(['GET', 'POST'])
@permission_required('wiki.move_tree')
@process_document_path
@check_readonly
@prevent_indexing
def move(request, document_slug, document_locale):
"""Move a tree of pages"""
doc = get_object_or_404(
Document, locale=document_locale, slug=document_slug)
descendants = doc.get_descendants()
slug_split = _split_slug(doc.slug)
if request.method == 'POST':
form = TreeMoveForm(initial=request.GET, data=request.POST)
if form.is_valid():
conflicts = doc._tree_conflicts(form.cleaned_data['slug'])
if conflicts:
return render(request, 'wiki/move_document.html', {
'form': form,
'document': doc,
'descendants': descendants,
'descendants_count': len(descendants),
'conflicts': conflicts,
'SLUG_CLEANSING_REGEX': SLUG_CLEANSING_REGEX,
})
move_page.delay(document_locale, document_slug,
form.cleaned_data['slug'],
request.user.email)
return render(request, 'wiki/move_requested.html', {
'form': form,
'document': doc
})
else:
form = TreeMoveForm()
return render(request, 'wiki/move_document.html', {
'form': form,
'document': doc,
'descendants': descendants,
'descendants_count': len(descendants),
'SLUG_CLEANSING_REGEX': SLUG_CLEANSING_REGEX,
'specific_slug': slug_split['specific']
})
@process_document_path
@check_readonly
@superuser_required
def repair_breadcrumbs(request, document_slug, document_locale):
doc = get_object_or_404(Document,
locale=document_locale,
slug=document_slug)
doc.repair_breadcrumbs()
return redirect(doc.get_absolute_url())
def ckeditor_config(request):
"""Return ckeditor config from database"""
default_config = EditorToolbar.objects.filter(name='default')
if default_config.exists():
code = default_config[0].code
else:
code = ''
context = {
'editor_config': code,
'redirect_pattern': REDIRECT_CONTENT,
'allowed_tags': ' '.join(ALLOWED_TAGS),
}
return render(request, 'wiki/ckeditor_config.js', context,
content_type="application/x-javascript")
@login_required
@require_POST
def preview_revision(request):
"""
Create an HTML fragment preview of the posted wiki syntax.
"""
wiki_content = request.POST.get('content', '')
kumascript_errors = []
doc_id = request.POST.get('doc_id')
if doc_id:
doc = Document.objects.get(id=doc_id)
else:
doc = None
if kumascript.should_use_rendered(doc, request.GET, html=wiki_content):
wiki_content, kumascript_errors = kumascript.post(request,
wiki_content,
request.locale)
# TODO: Get doc ID from JSON.
context = {
'content': wiki_content,
'title': request.POST.get('title', ''),
'kumascript_errors': kumascript_errors,
}
return render(request, 'wiki/preview.html', context)
def _make_doc_structure(document, level, expand, depth):
if document.is_redirect:
return None
if expand:
result = dict(document.get_json_data())
result['subpages'] = []
else:
result = {
'title': document.title,
'slug': document.slug,
'locale': document.locale,
'url': document.get_absolute_url(),
'subpages': []
}
if level < depth:
descendants = document.get_descendants(1)
descendants.sort(key=lambda item: item.title)
for descendant in descendants:
subpage = _make_doc_structure(descendant, level + 1, expand, depth)
if subpage is not None:
result['subpages'].append(subpage)
return result
@require_GET
@allow_CORS_GET
@process_document_path
def get_children(request, document_slug, document_locale):
"""Retrieves a document and returns its children in a JSON structure"""
expand = 'expand' in request.GET
max_depth = 5
depth = int(request.GET.get('depth', max_depth))
if depth > max_depth:
depth = max_depth
result = []
try:
doc = Document.objects.get(locale=document_locale,
slug=document_slug)
result = _make_doc_structure(doc, 0, expand, depth)
except Document.DoesNotExist:
result = {'error': 'Document does not exist.'}
result = json.dumps(result)
return HttpResponse(result, content_type='application/json')
@require_GET
@allow_CORS_GET
@newrelic.agent.function_trace()
def autosuggest_documents(request):
"""Returns the closest title matches for front-end autosuggests"""
partial_title = request.GET.get('term', '')
locale = request.GET.get('locale', False)
current_locale = request.GET.get('current_locale', False)
exclude_current_locale = request.GET.get('exclude_current_locale', False)
if not partial_title:
# Only handle actual autosuggest requests, not requests for a
# memory-busting list of all documents.
return HttpResponseBadRequest(_lazy('Autosuggest requires a partial title. For a full document index, see the main page.'))
# Retrieve all documents that aren't redirects or templates
docs = (Document.objects.extra(select={'length': 'Length(slug)'})
.filter(title__icontains=partial_title,
is_template=0,
is_redirect=0)
.exclude(slug__icontains='Talk:') # Remove old talk pages
.order_by('title', 'length'))
# All locales are assumed, unless a specific locale is requested or banned
if locale:
docs = docs.filter(locale=locale)
if current_locale:
docs = docs.filter(locale=request.locale)
if exclude_current_locale:
docs = docs.exclude(locale=request.locale)
# Generates a list of acceptable docs
docs_list = []
for doc in docs:
data = doc.get_json_data()
data['label'] += ' [' + doc.locale + ']'
docs_list.append(data)
data = json.dumps(docs_list)
return HttpResponse(data, content_type='application/json')
@require_GET
@process_document_path
@prevent_indexing
def document_revisions(request, document_slug, document_locale):
"""List all the revisions of a given document."""
locale = request.GET.get('locale', document_locale)
document = get_object_or_404(Document.objects
.select_related('current_revision'),
locale=locale,
slug=document_slug)
if document.current_revision is None:
raise Http404
def get_previous(revisions):
for current_revision in revisions:
for previous_revision in revisions:
# we filter out all revisions that are not approved
# as that's the way the get_previous method does it as well
# also let's skip comparing the same revisions
if (not previous_revision.is_approved or
current_revision.pk == previous_revision.pk):
continue
# we stick to the first revision that we find
if previous_revision.created < current_revision.created:
current_revision.previous_revision = previous_revision
break
return revisions
per_page = request.GET.get('limit', 10)
if not request.user.is_authenticated() and per_page == 'all':
return render(request, '403.html',
{'reason': 'revisions_login_required'}, status=403)
# Grab revisions, but defer summary and content because they can lead to
# attempts to cache more than memcached allows.
revisions = MultiQuerySet(
(Revision.objects.filter(pk=document.current_revision.pk)
.prefetch_related('creator', 'document')
.transform(get_previous)),
(Revision.objects.filter(document=document)
.order_by('-created', '-id')
.exclude(pk=document.current_revision.pk)
.prefetch_related('creator', 'document')
.transform(get_previous))
)
if not revisions.exists():
raise Http404
if per_page == 'all':
page = None
else:
try:
per_page = int(per_page)
except ValueError:
per_page = DOCUMENTS_PER_PAGE
page = paginate(request, revisions, per_page)
revisions = page.object_list
context = {
'revisions': revisions,
'document': document,
'page': page,
}
return render(request, 'wiki/document_revisions.html', context)
@require_GET
@xframe_options_sameorigin
@process_document_path
@prevent_indexing
def compare_revisions(request, document_slug, document_locale):
"""Compare two wiki document revisions.
The ids are passed as query string parameters (to and from).
"""
locale = request.GET.get('locale', document_locale)
doc = get_object_or_404(Document,
locale=locale,
slug=document_slug)
if 'from' not in request.GET or 'to' not in request.GET:
raise Http404
try:
from_id = smart_int(request.GET.get('from'))
to_id = smart_int(request.GET.get('to'))
except:
# Punt any errors in parameter handling to a 404
raise Http404
revision_from = get_object_or_404(Revision, id=from_id, document=doc)
revision_to = get_object_or_404(Revision, id=to_id, document=doc)
context = {
'document': doc,
'revision_from': revision_from,
'revision_to': revision_to,
}
if request.GET.get('raw', False):
template = 'wiki/includes/revision_diff_table.html'
else:
template = 'wiki/compare_revisions.html'
return render(request, template, context)
@login_required
@process_document_path
def select_locale(request, document_slug, document_locale):
"""Select a locale to translate the document to."""
doc = get_object_or_404(Document,
locale=document_locale,
slug=document_slug)
return render(request, 'wiki/select_locale.html', {'document': doc})
@require_http_methods(['GET', 'POST'])
@login_required
@process_document_path
@check_readonly
@prevent_indexing
@never_cache
def translate(request, document_slug, document_locale, revision_id=None):
"""
Create a new translation of a wiki document.
* document_slug is for the default locale
* translation is to the request locale
"""
# TODO: Refactor this view into two views? (new, edit)
# That might help reduce the headache-inducing branchiness.
parent_doc = get_object_or_404(Document,
locale=settings.WIKI_DEFAULT_LANGUAGE,
slug=document_slug)
user = request.user
if not revision_id:
# HACK: Seems weird, but sticking the translate-to locale in a query
# param is the best way to avoid the MindTouch-legacy locale
# redirection logic.
document_locale = request.GET.get('tolocale',
document_locale)
# Set a "Discard Changes" page
discard_href = ''
if settings.WIKI_DEFAULT_LANGUAGE == document_locale:
# Don't translate to the default language.
return HttpResponseRedirect(reverse(
'wiki.edit_document', locale=settings.WIKI_DEFAULT_LANGUAGE,
args=[parent_doc.slug]))
if not parent_doc.is_localizable:
message = _lazy(u'You cannot translate this document.')
context = {'message': message}
return render(request, 'handlers/400.html', context, status=400)
if revision_id:
get_object_or_404(Revision, pk=revision_id)
based_on_rev = parent_doc.current_or_latest_revision()
disclose_description = bool(request.GET.get('opendescription'))
try:
| doc = parent_doc.translations.get(locale=document_locale) | 4,854 | lcc_e | python | null | ecaeaa9e3094860fecd69a5572e6bac80ccc445e259a0bf1 |
|
"""Sorted List
==============
:doc:`Sorted Containers<index>` is an Apache2 licensed Python sorted
collections library, written in pure-Python, and fast as C-extensions. The
:doc:`introduction<introduction>` is the best way to get started.
Sorted list implementations:
.. currentmodule:: sortedcontainers
* :class:`SortedList`
* :class:`SortedKeyList`
"""
# pylint: disable=too-many-lines
from __future__ import print_function
from bisect import bisect_left, bisect_right, insort
from itertools import chain, repeat, starmap
from math import log
from operator import add, eq, ne, gt, ge, lt, le, iadd
from textwrap import dedent
from typing import Any, Callable
from collections.abc import Sequence, MutableSequence
from functools import wraps
from sys import hexversion
if hexversion < 0x03000000:
from itertools import imap as map # type: ignore
from itertools import izip as zip # type: ignore
try:
from thread import get_ident
except ImportError:
from dummy_thread import get_ident
else:
from functools import reduce
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
def _make_cmp(seq_op: Callable[[Any, Any], bool], symbol: str, doc: str) -> Callable:
"Make comparator method."
def comparer(one: "SortedList", other: "SortedList") -> bool:
"Compare method for sorted list and sequence."
if not isinstance(other, Sequence):
return NotImplemented
one_len = one._len
len_other = len(other)
if one_len != len_other:
if seq_op is eq:
return False
if seq_op is ne:
return True
for alpha, beta in zip(one, other):
if alpha != beta:
return seq_op(alpha, beta)
return seq_op(one_len, len_other)
seq_op_name = seq_op.__name__
comparer.__name__ = '__{0}__'.format(seq_op_name)
doc_str = """Return true if and only if sorted list is {0} `other`.
``sl.__{1}__(other)`` <==> ``sl {2} other``
Comparisons use lexicographical order as with sequences.
Runtime complexity: `O(n)`
:param other: `other` sequence
:return: true if sorted list is {0} `other`
"""
comparer.__doc__ = dedent(doc_str.format(doc, seq_op_name, symbol))
return comparer
def recursive_repr(fillvalue='...'):
"Decorator to make a repr function return fillvalue for a recursive call."
# pylint: disable=missing-docstring
# Copied from reprlib in Python 3
# https://hg.python.org/cpython/file/3.6/Lib/reprlib.py
def decorating_function(user_function):
repr_running = set()
@wraps(user_function)
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
return wrapper
return decorating_function
# END Python 2/3 Shims
class SortedList(MutableSequence):
"""Sorted list is a sorted mutable sequence.
Sorted list values are maintained in sorted order.
Sorted list values must be comparable. The total ordering of values must
not change while they are stored in the sorted list.
Methods for adding values:
* :func:`SortedList.add`
* :func:`SortedList.update`
* :func:`SortedList.__add__`
* :func:`SortedList.__iadd__`
* :func:`SortedList.__mul__`
* :func:`SortedList.__imul__`
Methods for removing values:
* :func:`SortedList.clear`
* :func:`SortedList.discard`
* :func:`SortedList.remove`
* :func:`SortedList.pop`
* :func:`SortedList.__delitem__`
Methods for looking up values:
* :func:`SortedList.bisect_left`
* :func:`SortedList.bisect_right`
* :func:`SortedList.count`
* :func:`SortedList.index`
* :func:`SortedList.__contains__`
* :func:`SortedList.__getitem__`
Methods for iterating values:
* :func:`SortedList.irange`
* :func:`SortedList.islice`
* :func:`SortedList.__iter__`
* :func:`SortedList.__reversed__`
Methods for miscellany:
* :func:`SortedList.copy`
* :func:`SortedList.__len__`
* :func:`SortedList.__repr__`
* :func:`SortedList._check`
* :func:`SortedList._reset`
Sorted lists use lexicographical ordering semantics when compared to other
sequences.
Some methods of mutable sequences are not supported and will raise
not-implemented error.
"""
DEFAULT_LOAD_FACTOR = 1000
def __init__(self, iterable=None, key=None):
"""Initialize sorted list instance.
Optional `iterable` argument provides an initial iterable of values to
initialize the sorted list.
Runtime complexity: `O(n*log(n))`
>>> sl = SortedList()
>>> sl
SortedList([])
>>> sl = SortedList([3, 1, 2, 5, 4])
>>> sl
SortedList([1, 2, 3, 4, 5])
:param iterable: initial values (optional)
"""
assert key is None
self._len = 0
self._load = self.DEFAULT_LOAD_FACTOR
self._lists = []
self._maxes = []
self._index = []
self._offset = 0
if iterable is not None:
self._update(iterable)
def __new__(cls, iterable=None, key=None):
"""Create new sorted list or sorted-key list instance.
Optional `key`-function argument will return an instance of subtype
:class:`SortedKeyList`.
>>> sl = SortedList()
>>> isinstance(sl, SortedList)
True
>>> sl = SortedList(key=lambda x: -x)
>>> isinstance(sl, SortedList)
True
>>> isinstance(sl, SortedKeyList)
True
:param iterable: initial values (optional)
:param key: function used to extract comparison key (optional)
:return: sorted list or sorted-key list instance
"""
# pylint: disable=unused-argument
if key is None:
return object.__new__(cls)
else:
if cls is SortedList:
return object.__new__(SortedKeyList)
else:
raise TypeError('inherit SortedKeyList for key argument')
@property
def key(self): # pylint: disable=useless-return
"""Function used to extract comparison key from values.
Sorted list compares values directly so the key function is none.
"""
return None
def _reset(self, load):
"""Reset sorted list load factor.
The `load` specifies the load-factor of the list. The default load
factor of 1000 works well for lists from tens to tens-of-millions of
values. Good practice is to use a value that is the cube root of the
list size. With billions of elements, the best load factor depends on
your usage. It's best to leave the load factor at the default until you
start benchmarking.
See :doc:`implementation` and :doc:`performance-scale` for more
information.
Runtime complexity: `O(n)`
:param int load: load-factor for sorted list sublists
"""
values = reduce(iadd, self._lists, [])
self._clear()
self._load = load
self._update(values)
def clear(self):
"""Remove all values from sorted list.
Runtime complexity: `O(n)`
"""
self._len = 0
del self._lists[:]
del self._maxes[:]
del self._index[:]
self._offset = 0
_clear = clear
def add(self, value):
"""Add `value` to sorted list.
Runtime complexity: `O(log(n))` -- approximate.
>>> sl = SortedList()
>>> sl.add(3)
>>> sl.add(1)
>>> sl.add(2)
>>> sl
SortedList([1, 2, 3])
:param value: value to add to sorted list
"""
_lists = self._lists
_maxes = self._maxes
if _maxes:
pos = bisect_right(_maxes, value)
if pos == len(_maxes):
pos -= 1
_lists[pos].append(value)
_maxes[pos] = value
else:
insort(_lists[pos], value)
self._expand(pos)
else:
_lists.append([value])
_maxes.append(value)
self._len += 1
def _expand(self, pos):
"""Split sublists with length greater than double the load-factor.
Updates the index when the sublist length is less than double the load
level. This requires incrementing the nodes in a traversal from the
leaf node to the root. For an example traversal see
``SortedList._loc``.
"""
_load = self._load
_lists = self._lists
_index = self._index
if len(_lists[pos]) > (_load << 1):
_maxes = self._maxes
_lists_pos = _lists[pos]
half = _lists_pos[_load:]
del _lists_pos[_load:]
_maxes[pos] = _lists_pos[-1]
_lists.insert(pos + 1, half)
_maxes.insert(pos + 1, half[-1])
del _index[:]
else:
if _index:
child = self._offset + pos
while child:
_index[child] += 1
child = (child - 1) >> 1
_index[0] += 1
def update(self, iterable):
"""Update sorted list by adding all values from `iterable`.
Runtime complexity: `O(k*log(n))` -- approximate.
>>> sl = SortedList()
>>> sl.update([3, 1, 2])
>>> sl
SortedList([1, 2, 3])
:param iterable: iterable of values to add
"""
_lists = self._lists
_maxes = self._maxes
values = sorted(iterable)
if _maxes:
if len(values) * 4 >= self._len:
values.extend(chain.from_iterable(_lists))
values.sort()
self._clear()
else:
_add = self.add
for val in values:
_add(val)
return
_load = self._load
_lists.extend(values[pos:(pos + _load)]
for pos in range(0, len(values), _load))
_maxes.extend(sublist[-1] for sublist in _lists)
self._len = len(values)
del self._index[:]
_update = update
def __contains__(self, value):
"""Return true if `value` is an element of the sorted list.
``sl.__contains__(value)`` <==> ``value in sl``
Runtime complexity: `O(log(n))`
>>> sl = SortedList([1, 2, 3, 4, 5])
>>> 3 in sl
True
:param value: search for value in sorted list
:return: true if `value` in sorted list
"""
_maxes = self._maxes
if not _maxes:
return False
pos = bisect_left(_maxes, value)
if pos == len(_maxes):
return False
_lists = self._lists
idx = bisect_left(_lists[pos], value)
return _lists[pos][idx] == value
def discard(self, value):
"""Remove `value` from sorted list if it is a member.
If `value` is not a member, do nothing.
Runtime complexity: `O(log(n))` -- approximate.
>>> sl = SortedList([1, 2, 3, 4, 5])
>>> sl.discard(5)
>>> sl.discard(0)
>>> sl == [1, 2, 3, 4]
True
:param value: `value` to discard from sorted list
"""
_maxes = self._maxes
if not _maxes:
return
pos = bisect_left(_maxes, value)
if pos == len(_maxes):
return
_lists = self._lists
idx = bisect_left(_lists[pos], value)
if _lists[pos][idx] == value:
self._delete(pos, idx)
def remove(self, value):
"""Remove `value` from sorted list; `value` must be a member.
If `value` is not a member, raise ValueError.
Runtime complexity: `O(log(n))` -- approximate.
>>> sl = SortedList([1, 2, 3, 4, 5])
>>> sl.remove(5)
>>> sl == [1, 2, 3, 4]
True
>>> sl.remove(0)
Traceback (most recent call last):
...
ValueError: 0 not in list
:param value: `value` to remove from sorted list
:raises ValueError: if `value` is not in sorted list
"""
_maxes = self._maxes
if not _maxes:
raise ValueError('{0!r} not in list'.format(value))
pos = bisect_left(_maxes, value)
if pos == len(_maxes):
raise ValueError('{0!r} not in list'.format(value))
_lists = self._lists
idx = bisect_left(_lists[pos], value)
if _lists[pos][idx] == value:
self._delete(pos, idx)
else:
raise ValueError('{0!r} not in list'.format(value))
def _delete(self, pos, idx):
"""Delete value at the given `(pos, idx)`.
Combines lists that are less than half the load level.
Updates the index when the sublist length is more than half the load
level. This requires decrementing the nodes in a traversal from the
leaf node to the root. For an example traversal see
``SortedList._loc``.
:param int pos: lists index
:param int idx: sublist index
"""
_lists = self._lists
_maxes = self._maxes
_index = self._index
_lists_pos = _lists[pos]
del _lists_pos[idx]
self._len -= 1
len_lists_pos = len(_lists_pos)
if len_lists_pos > (self._load >> 1):
_maxes[pos] = _lists_pos[-1]
if _index:
child = self._offset + pos
while child > 0:
_index[child] -= 1
child = (child - 1) >> 1
_index[0] -= 1
elif len(_lists) > 1:
if not pos:
pos += 1
prev = pos - 1
_lists[prev].extend(_lists[pos])
_maxes[prev] = _lists[prev][-1]
del _lists[pos]
del _maxes[pos]
del _index[:]
self._expand(prev)
elif len_lists_pos:
_maxes[pos] = _lists_pos[-1]
else:
del _lists[pos]
del _maxes[pos]
del _index[:]
def _loc(self, pos, idx):
"""Convert an index pair (lists index, sublist index) into a single
index number that corresponds to the position of the value in the
sorted list.
Many queries require the index be built. Details of the index are
described in ``SortedList._build_index``.
Indexing requires traversing the tree from a leaf node to the root. The
parent of each node is easily computable at ``(pos - 1) // 2``.
Left-child nodes are always at odd indices and right-child nodes are
always at even indices.
When traversing up from a right-child node, increment the total by the
left-child node.
The final index is the sum from traversal and the index in the sublist.
For example, using the index from ``SortedList._build_index``::
_index = 14 5 9 3 2 4 5
_offset = 3
Tree::
14
5 9
3 2 4 5
Converting an index pair (2, 3) into a single index involves iterating
like so:
1. Starting at the leaf node: offset + alpha = 3 + 2 = 5. We identify
the node as a left-child node. At such nodes, we simply traverse to
the parent.
2. At node 9, position 2, we recognize the node as a right-child node
and accumulate the left-child in our total. Total is now 5 and we
traverse to the parent at position 0.
3. Iteration ends at the root.
The index is then the sum of the total and sublist index: 5 + 3 = 8.
:param int pos: lists index
:param int idx: sublist index
:return: index in sorted list
"""
if not pos:
return idx
_index = self._index
if not _index:
self._build_index()
total = 0
# Increment pos to point in the index to len(self._lists[pos]).
pos += self._offset
# Iterate until reaching the root of the index tree at pos = 0.
while pos:
# Right-child nodes are at odd indices. At such indices
# account the total below the left child node.
if not pos & 1:
total += _index[pos - 1]
# Advance pos to the parent node.
pos = (pos - 1) >> 1
return total + idx
def _pos(self, idx):
"""Convert an index into an index pair (lists index, sublist index)
that can be used to access the corresponding lists position.
Many queries require the index be built. Details of the index are
described in ``SortedList._build_index``.
Indexing requires traversing the tree to a leaf node. Each node has two
children which are easily computable. Given an index, pos, the
left-child is at ``pos * 2 + 1`` and the right-child is at ``pos * 2 +
2``.
When the index is less than the left-child, traversal moves to the
left sub-tree. Otherwise, the index is decremented by the left-child
and traversal moves to the right sub-tree.
At a child node, the indexing pair is computed from the relative
position of the child node as compared with the offset and the remaining
index.
For example, using the index from ``SortedList._build_index``::
_index = 14 5 9 3 2 4 5
_offset = 3
Tree::
14
5 9
3 2 4 5
Indexing position 8 involves iterating like so:
1. Starting at the root, position 0, 8 is compared with the left-child
node (5) which it is greater than. When greater the index is
decremented and the position is updated to the right child node.
2. At node 9 with index 3, we again compare the index to the left-child
node with value 4. Because the index is the less than the left-child
node, we simply traverse to the left.
3. At node 4 with index 3, we recognize that we are at a leaf node and
stop iterating.
4. To compute the sublist index, we subtract the offset from the index
of the leaf node: 5 - 3 = 2. To compute the index in the sublist, we
simply use the index remaining from iteration. In this case, 3.
The final index pair from our example is (2, 3) which corresponds to
index 8 in the sorted list.
:param int idx: index in sorted list
:return: (lists index, sublist index) pair
"""
if idx < 0:
last_len = len(self._lists[-1])
if (-idx) <= last_len:
return len(self._lists) - 1, last_len + idx
idx += self._len
if idx < 0:
raise IndexError('list index out of range')
elif idx >= self._len:
raise IndexError('list index out of range')
if idx < len(self._lists[0]):
return 0, idx
_index = self._index
if not _index:
self._build_index()
pos = 0
child = 1
len_index = len(_index)
while child < len_index:
index_child = _index[child]
if idx < index_child:
pos = child
else:
idx -= index_child
pos = child + 1
child = (pos << 1) + 1
return (pos - self._offset, idx)
def _build_index(self):
"""Build a positional index for indexing the sorted list.
Indexes are represented as binary trees in a dense array notation
similar to a binary heap.
For example, given a lists representation storing integers::
0: [1, 2, 3]
1: [4, 5]
2: [6, 7, 8, 9]
3: [10, 11, 12, 13, 14]
The first transformation maps the sub-lists by their length. The
first row of the index is the length of the sub-lists::
0: [3, 2, 4, 5]
Each row after that is the sum of consecutive pairs of the previous
row::
1: [5, 9]
2: [14]
Finally, the index is built by concatenating these lists together::
_index = [14, 5, 9, 3, 2, 4, 5]
An offset storing the start of the first row is also stored::
_offset = 3
When built, the index can be used for efficient indexing into the list.
See the comment and notes on ``SortedList._pos`` for details.
"""
row0 = list(map(len, self._lists))
if len(row0) == 1:
self._index[:] = row0
self._offset = 0
return
head = iter(row0)
tail = iter(head)
row1 = list(starmap(add, zip(head, tail)))
if len(row0) & 1:
row1.append(row0[-1])
if len(row1) == 1:
self._index[:] = row1 + row0
self._offset = 1
return
size = 2 ** (int(log(len(row1) - 1, 2)) + 1)
row1.extend(repeat(0, size - len(row1)))
tree = [row0, row1]
while len(tree[-1]) > 1:
head = iter(tree[-1])
tail = iter(head)
row = list(starmap(add, zip(head, tail)))
tree.append(row)
reduce(iadd, reversed(tree), self._index)
self._offset = size * 2 - 1
def __delitem__(self, index):
"""Remove value at `index` from sorted list.
``sl.__delitem__(index)`` <==> ``del sl[index]``
Supports slicing.
Runtime complexity: `O(log(n))` -- approximate.
>>> sl = SortedList('abcde')
>>> del sl[2]
>>> sl
SortedList(['a', 'b', 'd', 'e'])
>>> del sl[:2]
>>> sl
SortedList(['d', 'e'])
:param index: integer or slice for indexing
:raises IndexError: if index out of range
"""
if isinstance(index, slice):
start, stop, step = index.indices(self._len)
if step == 1 and start < stop:
if start == 0 and stop == self._len:
return self._clear()
elif self._len <= 8 * (stop - start):
values = self._getitem(slice(None, start))
if stop < self._len:
values += self._getitem(slice(stop, None))
self._clear()
return self._update(values)
indices = range(start, stop, step)
# Delete items from greatest index to least so
# that the indices remain valid throughout iteration.
if step > 0:
indices = reversed(indices)
_pos, _delete = self._pos, self._delete
for index in indices:
pos, idx = _pos(index)
_delete(pos, idx)
else:
pos, idx = self._pos(index)
self._delete(pos, idx)
def __getitem__(self, index):
"""Lookup value at `index` in sorted list.
``sl.__getitem__(index)`` <==> ``sl[index]``
Supports slicing.
Runtime complexity: `O(log(n))` -- approximate.
>>> sl = SortedList('abcde')
>>> sl[1]
'b'
>>> sl[-1]
'e'
>>> sl[2:5]
['c', 'd', 'e']
:param index: integer or slice for indexing
:return: value or list of values
:raises IndexError: if index out of range
"""
_lists = self._lists
if isinstance(index, slice):
start, stop, step = index.indices(self._len)
if step == 1 and start < stop:
if start == 0 and stop == self._len:
return reduce(iadd, self._lists, [])
start_pos, start_idx = self._pos(start)
if stop == self._len:
stop_pos = len(_lists) - 1
stop_idx = len(_lists[stop_pos])
else:
stop_pos, stop_idx = self._pos(stop)
if start_pos == stop_pos:
return _lists[start_pos][start_idx:stop_idx]
prefix = _lists[start_pos][start_idx:]
middle = _lists[(start_pos + 1):stop_pos]
result = reduce(iadd, middle, prefix)
result += _lists[stop_pos][:stop_idx]
return result
if step == -1 and start > stop:
result = self._getitem(slice(stop + 1, start + 1))
result.reverse()
return result
# Return a list because a negative step could
# reverse the order of the items and this could
# be the desired behavior.
indices = range(start, stop, step)
return list(self._getitem(index) for index in indices)
else:
if self._len:
if index == 0:
return _lists[0][0]
elif index == -1:
return _lists[-1][-1]
else:
raise IndexError('list index out of range')
if 0 <= index < len(_lists[0]):
return _lists[0][index]
len_last = len(_lists[-1])
if -len_last < index < 0:
return _lists[-1][len_last + index]
pos, idx = self._pos(index)
return _lists[pos][idx]
_getitem = __getitem__
def __setitem__(self, index, value):
"""Raise not-implemented error.
``sl.__setitem__(index, value)`` <==> ``sl[index] = value``
:raises NotImplementedError: use ``del sl[index]`` and
``sl.add(value)`` instead
"""
message = 'use ``del sl[index]`` and ``sl.add(value)`` instead'
raise NotImplementedError(message)
def __iter__(self):
"""Return an iterator over the sorted list.
``sl.__iter__()`` <==> ``iter(sl)``
Iterating the sorted list while adding or deleting values may raise a
:exc:`RuntimeError` or fail to iterate over all values.
"""
return chain.from_iterable(self._lists)
def __reversed__(self):
"""Return a reverse iterator over the sorted list.
``sl.__reversed__()`` <==> ``reversed(sl)``
Iterating the sorted list while adding or deleting values may raise a
:exc:`RuntimeError` or fail to iterate over all values.
"""
return chain.from_iterable(map(reversed, reversed(self._lists)))
def reverse(self):
"""Raise not-implemented error.
Sorted list maintains values in ascending sort order. Values may not be
reversed in-place.
Use ``reversed(sl)`` for an iterator over values in descending sort
order.
Implemented to override `MutableSequence.reverse` which provides an
erroneous default implementation.
:raises NotImplementedError: use ``reversed(sl)`` instead
"""
raise NotImplementedError('use ``reversed(sl)`` instead')
def islice(self, start=None, stop=None, reverse=False):
"""Return an iterator that slices sorted list from `start` to `stop`.
The `start` and `stop` index are treated inclusive and exclusive,
respectively.
Both `start` and `stop` default to `None` which is automatically
inclusive of the beginning and end of the sorted list.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
>>> sl = SortedList('abcdefghij')
>>> it = sl.islice(2, 6)
>>> list(it)
['c', 'd', 'e', 'f']
:param int start: start index (inclusive)
:param int stop: stop index (exclusive)
:param bool reverse: yield values in reverse order
:return: iterator
"""
_len = self._len
if not _len:
return iter(())
start, stop, _ = slice(start, stop).indices(self._len)
if start >= stop:
return iter(())
_pos = self._pos
min_pos, min_idx = _pos(start)
if stop == _len:
max_pos = len(self._lists) - 1
max_idx = len(self._lists[-1])
else:
max_pos, max_idx = _pos(stop)
return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
def _islice(self, min_pos, min_idx, max_pos, max_idx, reverse):
"""Return an iterator that slices sorted list using two index pairs.
The index pairs are (min_pos, min_idx) and (max_pos, max_idx), the
first inclusive and the latter exclusive. See `_pos` for details on how
an index is converted to an index pair.
When `reverse` is `True`, values are yielded from the iterator in
reverse order.
"""
_lists = self._lists
if min_pos > max_pos:
return iter(())
if min_pos == max_pos:
if reverse:
indices = reversed(range(min_idx, max_idx))
return map(_lists[min_pos].__getitem__, indices)
indices = range(min_idx, max_idx)
return map(_lists[min_pos].__getitem__, indices)
next_pos = min_pos + 1
if next_pos == max_pos:
if reverse:
min_indices = range(min_idx, len(_lists[min_pos]))
max_indices = range(max_idx)
return chain(
map(_lists[max_pos].__getitem__, reversed(max_indices)),
map(_lists[min_pos].__getitem__, reversed(min_indices)),
)
min_indices = range(min_idx, len(_lists[min_pos]))
max_indices = range(max_idx)
return chain(
map(_lists[min_pos].__getitem__, min_indices),
map(_lists[max_pos].__getitem__, max_indices),
)
if reverse:
min_indices = range(min_idx, len(_lists[min_pos]))
sublist_indices = range(next_pos, max_pos)
sublists = map(_lists.__getitem__, reversed(sublist_indices))
max_indices = range(max_idx)
return chain(
map(_lists[max_pos].__getitem__, reversed(max_indices)),
chain.from_iterable(map(reversed, sublists)),
map(_lists[min_pos].__getitem__, reversed(min_indices)),
)
min_indices = range(min_idx, len(_lists[min_pos]))
sublist_indices = range(next_pos, max_pos)
sublists = map(_lists.__getitem__, sublist_indices)
max_indices = range(max_idx)
return chain(
map(_lists[min_pos].__getitem__, min_indices),
chain.from_iterable(sublists),
map(_lists[max_pos].__getitem__, max_indices),
)
def irange(self, minimum=None, maximum=None, inclusive=(True, True),
reverse=False):
"""Create an iterator of values between `minimum` and `maximum`.
Both `minimum` and `maximum` default to `None` which is automatically
inclusive of the beginning and end of the sorted list.
The argument `inclusive` is a pair of booleans that indicates whether
the minimum and maximum ought to be included in the range,
respectively. The default is ``(True, True)`` such that the range is
inclusive of both minimum and maximum.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
>>> sl = SortedList('abcdefghij')
>>> it = sl.irange('c', 'f')
>>> list(it)
['c', 'd', 'e', 'f']
:param minimum: minimum value to start iterating
:param maximum: maximum value to stop iterating
:param inclusive: pair of booleans
:param bool reverse: yield values in reverse order
:return: iterator
"""
_maxes = self._maxes
if not _maxes:
return iter(())
_lists = self._lists
# Calculate the minimum (pos, idx) pair. By default this location
# will be inclusive in our calculation.
if minimum is None:
min_pos = 0
min_idx = 0
else:
if inclusive[0]:
min_pos = bisect_left(_maxes, minimum)
if min_pos == len(_maxes):
return iter(())
min_idx = bisect_left(_lists[min_pos], minimum)
else:
min_pos = bisect_right(_maxes, minimum)
if min_pos == len(_maxes):
return iter(())
min_idx = bisect_right(_lists[min_pos], minimum)
# Calculate the maximum (pos, idx) pair. By default this location
# will be exclusive in our calculation.
if maximum is None:
max_pos = len(_maxes) - 1
max_idx = len(_lists[max_pos])
else:
if inclusive[1]:
max_pos = bisect_right(_maxes, maximum)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_lists[max_pos])
else:
max_idx = bisect_right(_lists[max_pos], maximum)
else:
max_pos = bisect_left(_maxes, maximum)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_lists[max_pos])
else:
max_idx = bisect_left(_lists[max_pos], maximum)
return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
def __len__(self):
"""Return the size of the sorted list.
``sl.__len__()`` <==> ``len(sl)``
:return: size of sorted list
"""
return self._len
def bisect_left(self, value):
"""Return an index to insert `value` in the sorted list.
If the `value` is already present, the insertion point will be before
(to the left of) any existing values.
Similar to the `bisect` module in the standard library.
Runtime complexity: `O(log(n))` -- approximate.
>>> sl = SortedList([10, 11, 12, 13, 14])
>>> sl.bisect_left(12)
2
:param value: insertion index of value in sorted list
:return: index
"""
_maxes = self._maxes
if not _maxes:
return 0
pos = bisect_left(_maxes, value)
if pos == len(_maxes):
return self._len
idx = bisect_left(self._lists[pos], value)
return self._loc(pos, idx)
def bisect_right(self, value):
"""Return an index to insert `value` in the sorted list.
Similar to `bisect_left`, but if `value` is already present, the
insertion point with be after (to the right of) any existing values.
Similar to the `bisect` module in the standard library.
Runtime complexity: `O(log(n))` -- approximate.
>>> sl = SortedList([10, 11, 12, 13, 14])
>>> sl.bisect_right(12)
3
:param value: insertion index of value in sorted list
:return: index
"""
_maxes = self._maxes
if not _maxes:
return 0
pos = bisect_right(_maxes, value)
if pos == len(_maxes):
return self._len
idx = bisect_right(self._lists[pos], value)
return self._loc(pos, idx)
bisect = bisect_right
_bisect_right = bisect_right
def count(self, value):
"""Return number of occurrences of `value` in the sorted list.
Runtime complexity: `O(log(n))` -- approximate.
>>> sl = SortedList([1, 2, 2, 3, 3, 3, 4, 4, 4, 4])
>>> sl.count(3)
3
:param value: value to count in sorted list
:return: count
"""
_maxes = self._maxes
if not _maxes:
return 0
pos_left = bisect_left(_maxes, value)
if pos_left == len(_maxes):
return 0
_lists = self._lists
idx_left = bisect_left(_lists[pos_left], value)
pos_right = bisect_right(_maxes, value)
if pos_right == len(_maxes):
return self._len - self._loc(pos_left, idx_left)
idx_right = bisect_right(_lists[pos_right], value)
if pos_left == pos_right:
return idx_right - idx_left
right = self._loc(pos_right, idx_right)
left = self._loc(pos_left, idx_left)
return right - left
def copy(self):
"""Return a shallow copy of the sorted list.
Runtime complexity: `O(n)`
:return: new sorted list
"""
return self.__class__(self)
__copy__ = copy
def append(self, value):
"""Raise not-implemented error.
Implemented to override `MutableSequence.append` which provides an
erroneous default implementation.
:raises NotImplementedError: use ``sl.add(value)`` instead
"""
raise NotImplementedError('use ``sl.add(value)`` instead')
def extend(self, values):
"""Raise not-implemented error.
Implemented to override `MutableSequence.extend` which provides an
erroneous default implementation.
:raises NotImplementedError: use ``sl.update(values)`` instead
"""
raise NotImplementedError('use ``sl.update(values)`` instead')
def insert(self, index, value):
"""Raise not-implemented error.
:raises NotImplementedError: use ``sl.add(value)`` instead
"""
raise NotImplementedError('use ``sl.add(value)`` instead')
def pop(self, index=-1):
"""Remove and return value at `index` in sorted list.
Raise :exc:`IndexError` if the sorted list is empty or index is out of
range.
Negative indices are supported.
Runtime complexity: `O(log(n))` -- approximate.
>>> sl = SortedList('abcde')
>>> sl.pop()
'e'
>>> sl.pop(2)
'c'
>>> sl
SortedList(['a', 'b', 'd'])
:param int index: index of value (default -1)
:return: value
:raises IndexError: if index is out of range
"""
if not self._len:
raise IndexError('pop index out of range')
_lists = self._lists
if index == 0:
val = _lists[0][0]
self._delete(0, 0)
return val
if index == -1:
pos = len(_lists) - 1
loc = len(_lists[pos]) - 1
val = _lists[pos][loc]
self._delete(pos, loc)
return val
if 0 <= index < len(_lists[0]):
val = _lists[0][index]
self._delete(0, index)
return val
len_last = len(_lists[-1])
if -len_last < index < 0:
pos = len(_lists) - 1
loc = len_last + index
val = _lists[pos][loc]
self._delete(pos, loc)
return val
pos, idx = self._pos(index)
val = _lists[pos][idx]
self._delete(pos, idx)
return val
def index(self, value, start=None, stop=None):
"""Return first index of value in sorted list.
Raise ValueError if `value` is not present.
Index must be between `start` and `stop` for the `value` to be
considered present. The default value, None, for `start` and `stop`
indicate the beginning and end of the sorted list.
Negative indices are supported.
Runtime complexity: `O(log(n))` -- approximate.
>>> sl = SortedList('abcde')
>>> sl.index('d')
3
>>> sl.index('z')
Traceback (most recent call last):
...
ValueError: 'z' is not in list
:param value: value in sorted list
:param int start: start index (default None, start of sorted list)
:param int stop: stop index (default None, end of sorted list)
:return: index of value
:raises ValueError: if value is not present
"""
_len = self._len
if not _len:
raise ValueError('{0!r} is not in list'.format(value))
if start is None:
start = 0
if start < 0:
start += _len
if start < 0:
start = 0
if stop is None:
stop = _len
if stop < 0:
stop += _len
if stop > _len:
stop = _len
if stop <= start:
raise ValueError('{0!r} is not in list'.format(value))
_maxes = self._maxes
pos_left = bisect_left(_maxes, value)
if pos_left == len(_maxes):
raise ValueError('{0!r} is not in list'.format(value))
_lists = self._lists
idx_left = bisect_left(_lists[pos_left], value)
if _lists[pos_left][idx_left] != value:
raise ValueError('{0!r} is not in list'.format(value))
stop -= 1
left = self._loc(pos_left, idx_left)
if start <= left:
if left <= stop:
return left
else:
right = self._bisect_right(value) - 1
if start <= right:
return start
raise ValueError('{0!r} is not in list'.format(value))
def __add__(self, other):
"""Return new sorted list containing all values in both sequences.
``sl.__add__(other)`` <==> ``sl + other``
Values in `other` do not need to be in sorted order.
Runtime complexity: `O(n*log(n))`
>>> sl1 = SortedList('bat')
>>> sl2 = SortedList('cat')
>>> sl1 + sl2
SortedList(['a', 'a', 'b', 'c', 't', 't'])
:param other: other iterable
:return: new sorted list
"""
values = reduce(iadd, self._lists, [])
values.extend(other)
return self.__class__(values)
__radd__ = __add__
def __iadd__(self, other):
"""Update sorted list with values from `other`.
``sl.__iadd__(other)`` <==> ``sl += other``
Values in `other` do not need to be in sorted order.
Runtime complexity: `O(k*log(n))` -- approximate.
>>> sl = SortedList('bat')
>>> sl += 'cat'
>>> sl
SortedList(['a', 'a', 'b', 'c', 't', 't'])
:param other: other iterable
:return: existing sorted list
"""
self._update(other)
return self
def __mul__(self, num):
"""Return new sorted list with `num` shallow copies of values.
``sl.__mul__(num)`` <==> ``sl * num``
Runtime complexity: `O(n*log(n))`
>>> sl = SortedList('abc')
>>> sl * 3
SortedList(['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'])
:param int num: count of shallow copies
:return: new sorted list
"""
values = reduce(iadd, self._lists, []) * num
return self.__class__(values)
__rmul__ = __mul__
def __imul__(self, num):
"""Update the sorted list with `num` shallow copies of values.
``sl.__imul__(num)`` <==> ``sl *= num``
Runtime complexity: `O(n*log(n))`
>>> sl = SortedList('abc')
>>> sl *= 3
>>> sl
SortedList(['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'])
:param int num: count of shallow copies
:return: existing sorted list
"""
values = reduce(iadd, self._lists, []) * num
self._clear()
self._update(values)
return self
__eq__ = _make_cmp(eq, '==', 'equal to')
__ne__ = _make_cmp(ne, '!=', 'not equal to')
__lt__ = _make_cmp(lt, '<', 'less than')
__gt__ = _make_cmp(gt, '>', 'greater than')
__le__ = _make_cmp(le, '<=', 'less than or equal to')
__ge__ = _make_cmp(ge, '>=', 'greater than or equal to')
__make_cmp = staticmethod(_make_cmp) # type: ignore
@recursive_repr()
def __repr__(self):
"""Return string representation of sorted list.
``sl.__repr__()`` <==> ``repr(sl)``
:return: string representation
"""
return '{0}({1!r})'.format(type(self).__name__, list(self))
def _check(self):
"""Check invariants of sorted list.
Runtime complexity: `O(n)`
"""
try:
assert self._load >= 4
assert len(self._maxes) == len(self._lists)
assert self._len == sum(len(sublist) for sublist in self._lists)
# Check all sublists are sorted.
for sublist in self._lists:
for pos in range(1, len(sublist)):
assert sublist[pos - 1] <= sublist[pos]
# Check beginning/end of sublists are sorted.
for pos in range(1, len(self._lists)):
assert self._lists[pos - 1][-1] <= self._lists[pos][0]
# Check _maxes index is the last value of each sublist.
for pos in range(len(self._maxes)):
assert self._maxes[pos] == self._lists[pos][-1]
# Check sublist lengths are less than double load-factor.
double = self._load << 1
assert all(len(sublist) <= double for sublist in self._lists)
# Check sublist lengths are greater than half load-factor for all
# but the last sublist.
half = self._load >> 1
for pos in range(0, len(self._lists) - 1):
assert len(self._lists[pos]) >= half
if self._index:
assert self._len == self._index[0]
assert len(self._index) == self._offset + len(self._lists)
# Check index leaf nodes equal length of sublists.
for pos in range(len(self._lists)):
leaf = self._index[self._offset + pos]
assert leaf == len(self._lists[pos])
# Check index branch nodes are the sum of their children.
for pos in range(self._offset):
child = (pos << 1) + 1
if child >= len(self._index):
assert self._index[pos] == 0
elif child + 1 == len(self._index):
assert self._index[pos] == self._index[child]
else:
child_sum = self._index[child] + self._index[child + 1]
assert child_sum == self._index[pos]
except:
import sys
import traceback
traceback.print_exc(file=sys.stdout)
print('len', self._len)
print('load', self._load)
print('offset', self._offset)
print('len_index', len(self._index))
print('index', self._index)
print('len_maxes', len(self._maxes))
print('maxes', self._maxes)
print('len_lists', len(self._lists))
print('lists', self._lists)
raise
def identity(value):
"Identity function."
return value
class SortedKeyList(SortedList):
"""Sorted-key list is a subtype of sorted list.
The sorted-key list maintains values in comparison order based on the
result of a key function applied to every value.
All the same methods that are available in :class:`SortedList` are also
available in :class:`SortedKeyList`.
Additional methods provided:
* :attr:`SortedKeyList.key`
* :func:`SortedKeyList.bisect_key_left`
* :func:`SortedKeyList.bisect_key_right`
* :func:`SortedKeyList.irange_key`
Some examples below use:
>>> from operator import neg
>>> neg
<built-in function neg>
>>> neg(1)
-1
"""
def __init__(self, iterable=None, key=identity):
"""Initialize sorted-key list instance.
Optional `iterable` argument provides an initial iterable of values to
initialize the sorted-key list.
Optional `key` argument defines a callable that, like the `key`
argument to Python's `sorted` function, extracts a comparison key from
each value. The default is the identity function.
Runtime complexity: `O(n*log(n))`
>>> from operator import neg
>>> skl = SortedKeyList(key=neg)
>>> skl
SortedKeyList([], key=<built-in function neg>)
>>> skl = SortedKeyList([3, 1, 2], key=neg)
>>> skl
SortedKeyList([3, 2, 1], key=<built-in function neg>)
:param iterable: initial values (optional)
:param key: function used to extract comparison key (optional)
"""
self._key = key
self._len = 0
self._load = self.DEFAULT_LOAD_FACTOR
self._lists = []
self._keys = []
self._maxes = []
self._index = []
self._offset = 0
if iterable is not None:
self._update(iterable)
def __new__(cls, iterable=None, key=identity):
return object.__new__(cls)
@property
def key(self):
"Function used to extract comparison key from values."
return self._key
def clear(self):
"""Remove all values from sorted-key list.
Runtime complexity: `O(n)`
"""
self._len = 0
del self._lists[:]
del self._keys[:]
del self._maxes[:]
del self._index[:]
_clear = clear
def add(self, value: Any) -> None:
"""Add `value` to sorted-key list.
Runtime complexity: `O(log(n))` -- approximate.
>>> from operator import neg
>>> skl = SortedKeyList(key=neg)
>>> skl.add(3)
>>> skl.add(1)
>>> skl.add(2)
>>> skl
SortedKeyList([3, 2, 1], key=<built-in function neg>)
:param value: value to add to sorted-key list
"""
_lists = self._lists
_keys = self._keys
_maxes = self._maxes
key = self._key(value)
if _maxes:
pos = bisect_right(_maxes, key)
if pos == len(_maxes):
pos -= 1
_lists[pos].append(value)
_keys[pos].append(key)
_maxes[pos] = key
else:
idx = bisect_right(_keys[pos], key)
_lists[pos].insert(idx, value)
_keys[pos].insert(idx, key)
self._expand(pos)
else:
_lists.append([value])
_keys.append([key])
_maxes.append(key)
self._len += 1
def _expand(self, pos):
"""Split sublists with length greater than double the load-factor.
Updates the index when the sublist length is less than double the load
level. This requires incrementing the nodes in a traversal from the
leaf node to the root. For an example traversal see
``SortedList._loc``.
"""
_lists = self._lists
_keys = self._keys
_index = self._index
if len(_keys[pos]) > (self._load << 1):
_maxes = self._maxes
_load = self._load
_lists_pos = _lists[pos]
_keys_pos = _keys[pos]
half = _lists_pos[_load:]
half_keys = _keys_pos[_load:]
del _lists_pos[_load:]
del _keys_pos[_load:]
_maxes[pos] = _keys_pos[-1]
_lists.insert(pos + 1, half)
_keys.insert(pos + 1, half_keys)
_maxes.insert(pos + 1, half_keys[-1])
del _index[:]
else:
if _index:
child = self._offset + pos
while child:
_index[child] += 1
child = (child - 1) >> 1
_index[0] += 1
def update(self, iterable):
"""Update sorted-key list by adding all values from `iterable`.
Runtime complexity: `O(k*log(n))` -- approximate.
>>> from operator import neg
>>> skl = SortedKeyList(key=neg)
>>> skl.update([3, 1, 2])
>>> skl
SortedKeyList([3, 2, 1], key=<built-in function neg>)
:param iterable: iterable of values to add
"""
_lists = self._lists
_keys = self._keys
_maxes = self._maxes
values = sorted(iterable, key=self._key)
if _maxes:
if len(values) * 4 >= self._len:
values.extend(chain.from_iterable(_lists))
values.sort(key=self._key)
self._clear()
else:
_add = self.add
for val in values:
_add(val)
return
_load = self._load
_lists.extend(values[pos:(pos + _load)]
for pos in range(0, len(values), _load))
_keys.extend(list(map(self._key, _list)) for _list in _lists)
_maxes.extend(sublist[-1] for sublist in _keys)
self._len = len(values)
del self._index[:]
_update = update
def __contains__(self, value):
"""Return true if `value` is an element of the sorted-key list.
``skl.__contains__(value)`` <==> ``value in skl``
Runtime complexity: `O(log(n))`
>>> from operator import neg
>>> skl = SortedKeyList([1, 2, 3, 4, 5], key=neg)
>>> 3 in skl
True
:param value: search for value in sorted-key list
:return: true if `value` in sorted-key list
"""
_maxes = self._maxes
if not _maxes:
return False
key = self._key(value)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return False
_lists = self._lists
_keys = self._keys
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
return False
if _lists[pos][idx] == value:
return True
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
return False
len_sublist = len(_keys[pos])
idx = 0
def discard(self, value):
"""Remove `value` from sorted-key list if it is a member.
If `value` is not a member, do nothing.
Runtime complexity: `O(log(n))` -- approximate.
>>> from operator import neg
>>> skl = SortedKeyList([5, 4, 3, 2, 1], key=neg)
>>> skl.discard(1)
>>> skl.discard(0)
>>> skl == [5, 4, 3, 2]
True
:param value: `value` to discard from sorted-key list
"""
_maxes = self._maxes
if not _maxes:
return
key = self._key(value)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return
_lists = self._lists
_keys = self._keys
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
return
if _lists[pos][idx] == value:
self._delete(pos, idx)
return
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
return
len_sublist = len(_keys[pos])
idx = 0
def remove(self, value: Any) -> None:
"""Remove `value` from sorted-key list; `value` must be a member.
If `value` is not a member, raise ValueError.
Runtime complexity: `O(log(n))` -- approximate.
>>> from operator import neg
>>> skl = SortedKeyList([1, 2, 3, 4, 5], key=neg)
>>> skl.remove(5)
>>> skl == [4, 3, 2, 1]
True
>>> skl.remove(0)
Traceback (most recent call last):
...
ValueError: 0 not in list
:param value: `value` to remove from sorted-key list
:raises ValueError: if `value` is not in sorted-key list
"""
_maxes = self._maxes
if not _maxes:
raise ValueError('{0!r} not in list'.format(value))
key = self._key(value)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
raise ValueError('{0!r} not in list'.format(value))
_lists = self._lists
_keys = self._keys
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
raise ValueError('{0!r} not in list'.format(value))
if _lists[pos][idx] == value:
self._delete(pos, idx)
return
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
raise ValueError('{0!r} not in list'.format(value))
len_sublist = len(_keys[pos])
idx = 0
def _delete(self, pos, idx):
"""Delete value at the given `(pos, idx)`.
Combines lists that are less than half the load level.
Updates the index when the sublist length is more than half the load
level. This requires decrementing the nodes in a traversal from the
leaf node to the root. For an example traversal see
``SortedList._loc``.
:param int pos: lists index
:param int idx: sublist index
"""
_lists = self._lists
_keys = self._keys
_maxes = self._maxes
_index = self._index
keys_pos = _keys[pos]
lists_pos = _lists[pos]
del keys_pos[idx]
del lists_pos[idx]
self._len -= 1
len_keys_pos = len(keys_pos)
if len_keys_pos > (self._load >> 1):
_maxes[pos] = keys_pos[-1]
if _index:
child = self._offset + pos
while child > 0:
_index[child] -= 1
child = (child - 1) >> 1
_index[0] -= 1
elif len(_keys) > 1:
if not pos:
pos += 1
prev = pos - 1
_keys[prev].extend(_keys[pos])
_lists[prev].extend(_lists[pos])
_maxes[prev] = _keys[prev][-1]
del _lists[pos]
del _keys[pos]
del _maxes[pos]
del _index[:]
self._expand(prev)
elif len_keys_pos:
_maxes[pos] = keys_pos[-1]
else:
del _lists[pos]
del _keys[pos]
del _maxes[pos]
del _index[:]
def irange(self, minimum=None, maximum=None, inclusive=(True, True),
reverse=False):
"""Create an iterator of values between `minimum` and `maximum`.
Both `minimum` and `maximum` default to `None` which is automatically
inclusive of the beginning and end of the sorted-key list.
The argument `inclusive` is a pair of booleans that indicates whether
the minimum and maximum ought to be included in the range,
respectively. The default is ``(True, True)`` such that the range is
inclusive of both minimum and maximum.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
>>> from operator import neg
>>> skl = SortedKeyList([11, 12, 13, 14, 15], key=neg)
>>> it = skl.irange(14.5, 11.5)
>>> list(it)
[14, 13, 12]
:param minimum: minimum value to start iterating
:param maximum: maximum value to stop iterating
:param inclusive: pair of booleans
:param bool reverse: yield values in reverse order
:return: iterator
"""
min_key = self._key(minimum) if minimum is not None else None
max_key = self._key(maximum) if maximum is not None else None
return self._irange_key(
min_key=min_key, max_key=max_key,
inclusive=inclusive, reverse=reverse,
)
def irange_key(self, min_key=None, max_key=None, inclusive=(True, True),
reverse=False):
"""Create an iterator of values between `min_key` and `max_key`.
Both `min_key` and `max_key` default to `None` which is automatically
inclusive of the beginning and end of the sorted-key list.
The argument `inclusive` is a pair of booleans that indicates whether
the minimum and maximum ought to be included in the range,
respectively. The default is ``(True, True)`` such that the range is
inclusive of both minimum and maximum.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
>>> from operator import neg
>>> skl = SortedKeyList([11, 12, 13, 14, 15], key=neg)
>>> it = skl.irange_key(-14, -12)
>>> list(it)
[14, 13, 12]
:param min_key: minimum key to start iterating
:param max_key: maximum key to stop iterating
:param inclusive: pair of booleans
:param bool reverse: yield values in reverse order
:return: iterator
"""
_maxes = self._maxes
if not _maxes:
return iter(())
_keys = self._keys
# Calculate the minimum (pos, idx) pair. By default this location
# will be inclusive in our calculation.
if min_key is None:
min_pos = 0
min_idx = 0
else:
if inclusive[0]:
min_pos = bisect_left(_maxes, min_key)
if min_pos == len(_maxes):
return iter(())
min_idx = bisect_left(_keys[min_pos], min_key)
else:
min_pos = bisect_right(_maxes, min_key)
if min_pos == len(_maxes):
return iter(())
min_idx = bisect_right(_keys[min_pos], min_key)
# Calculate the maximum (pos, idx) pair. By default this location
# will be exclusive in our calculation.
if max_key is None:
max_pos = len(_maxes) - 1
max_idx = len(_keys[max_pos])
else:
if inclusive[1]:
max_pos = bisect_right(_maxes, max_key)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_keys[max_pos])
else:
max_idx = bisect_right(_keys[max_pos], max_key)
else:
max_pos = bisect_left(_maxes, max_key)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_keys[max_pos])
else:
max_idx = bisect_left(_keys[max_pos], max_key)
return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
_irange_key = irange_key
def bisect_left(self, value):
"""Return an index to insert `value` in the sorted-key list.
If the `value` is already present, the insertion point will be before
(to the left of) any existing values.
Similar to the `bisect` module in the standard library.
Runtime complexity: `O(log(n))` -- approximate.
>>> from operator import neg
>>> skl = SortedKeyList([5, 4, 3, 2, 1], key=neg)
>>> skl.bisect_left(1)
4
:param value: insertion index of value in sorted-key list
:return: index
"""
return self._bisect_key_left(self._key(value))
def bisect_right(self, value):
"""Return an index to insert `value` in the sorted-key list.
Similar to `bisect_left`, but if `value` is already present, the
insertion point with be after (to the right of) any existing values.
Similar to the `bisect` module in the standard library.
Runtime complexity: `O(log(n))` -- approximate.
>>> from operator import neg
>>> skl = SortedList([5, 4, 3, 2, 1], key=neg)
>>> skl.bisect_right(1)
5
:param value: insertion index of value in sorted-key list
:return: index
"""
return self._bisect_key_right(self._key(value))
bisect = bisect_right
def bisect_key_left(self, key):
"""Return an index to insert `key` in the sorted-key list.
If the `key` is already present, the insertion point will be before (to
the left of) any existing keys.
Similar to the `bisect` module in the standard library.
Runtime complexity: `O(log(n))` -- approximate.
>>> from operator import neg
>>> skl = SortedKeyList([5, 4, 3, 2, 1], key=neg)
>>> skl.bisect_key_left(-1)
4
:param key: insertion index of key in sorted-key list
:return: index
"""
_maxes = self._maxes
if not _maxes:
return 0
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return self._len
idx = bisect_left(self._keys[pos], key)
return self._loc(pos, idx)
_bisect_key_left = bisect_key_left
def bisect_key_right(self, key):
"""Return an index to insert `key` in the sorted-key list.
Similar to `bisect_key_left`, but if `key` is already present, the
insertion point with be after (to the right of) any existing keys.
Similar to the `bisect` module in the standard library.
Runtime complexity: `O(log(n))` -- approximate.
>>> from operator import neg
>>> skl = SortedList([5, 4, 3, 2, 1], key=neg)
>>> skl.bisect_key_right(-1)
5
:param key: insertion index of key in sorted-key list
:return: index
"""
_maxes = self._maxes
if not _maxes:
return 0
pos = bisect_right(_maxes, key)
if pos == len(_maxes):
return self._len
idx = bisect_right(self._keys[pos], key)
return self._loc(pos, idx)
bisect_key = bisect_key_right
_bisect_key_right = bisect_key_right
def count(self, value):
"""Return number of occurrences of `value` in the sorted-key list.
Runtime complexity: `O(log(n))` -- approximate.
>>> from operator import neg
>>> skl = SortedKeyList([4, 4, 4, 4, 3, 3, 3, 2, 2, 1], key=neg)
>>> skl.count(2)
2
:param value: value to count in sorted-key list
:return: count
"""
_maxes = self._maxes
if not _maxes:
return 0
key = self._key(value)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return 0
_lists = self._lists
_keys = self._keys
idx = bisect_left(_keys[pos], key)
total = 0
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
return total
if _lists[pos][idx] == value:
total += 1
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
return total
len_sublist = len(_keys[pos])
idx = 0
def copy(self):
"""Return a shallow copy of the sorted-key list.
Runtime complexity: `O(n)`
:return: new sorted-key list
"""
return self.__class__(self, key=self._key)
__copy__ = copy
def index(self, value, start=None, stop=None):
"""Return first index of value in sorted-key list.
Raise ValueError if `value` is not present.
Index must be between `start` and `stop` for the `value` to be
considered present. The default value, None, for `start` and `stop`
indicate the beginning and end of the sorted-key list.
Negative indices are supported.
Runtime complexity: `O(log(n))` -- approximate.
>>> from operator import neg
>>> skl = SortedKeyList([5, 4, 3, 2, 1], key=neg)
>>> skl.index(2)
3
>>> skl.index(0)
Traceback (most recent call last):
...
ValueError: 0 is not in list
:param value: value in sorted-key list
:param int start: start index (default None, start of sorted-key list)
:param int stop: stop index (default None, end of sorted-key list)
:return: index of value
:raises ValueError: if value is not present
"""
_len = self._len
if not _len:
raise ValueError('{0!r} is not in list'.format(value))
if start is None:
start = 0
if start < 0:
start += _len
if start < 0:
start = 0
if stop is None:
stop = _len
if stop < 0:
stop += _len
if stop > _len:
stop = _len
if stop <= start:
raise ValueError('{0!r} is not in list'.format(value))
_maxes = self._maxes
key = self._key(value)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
raise ValueError('{0!r} is not in list'.format(value))
stop -= 1
_lists = self._lists
_keys = self._keys
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
raise ValueError('{0!r} is not in list'.format(value))
if _lists[pos][idx] == value:
loc = self._loc(pos, idx)
if start <= loc <= stop:
return loc
elif loc > stop:
break
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
raise ValueError('{0!r} is not in list'.format(value))
len_sublist = len(_keys[pos])
idx = 0
raise ValueError('{0!r} is not in list'.format(value))
def __add__(self, other):
"""Return new sorted-key list containing all values in both sequences.
``skl.__add__(other)`` <==> ``skl + other``
Values in `other` do not need to be in sorted-key order.
Runtime complexity: `O(n*log(n))`
>>> from operator import neg
>>> skl1 = SortedKeyList([5, 4, 3], key=neg)
>>> skl2 = SortedKeyList([2, 1, 0], key=neg)
>>> skl1 + skl2
SortedKeyList([5, 4, 3, 2, 1, 0], key=<built-in function neg>)
:param other: other iterable
:return: new sorted-key list
"""
values = reduce(iadd, self._lists, [])
values.extend(other)
| return self.__class__(values, key=self._key) | 7,674 | lcc_e | python | null | d11b4d760bd1b3107de6dfc8bd9a39d24c7696400da19e90 |
|
# orm/session.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the Session class and related utilities."""
import weakref
from itertools import chain
from sqlalchemy import util, sql, engine, log, exc as sa_exc
from sqlalchemy.sql import util as sql_util, expression
from sqlalchemy.orm import (
SessionExtension, attributes, exc, query, unitofwork, util as mapperutil, state
)
from sqlalchemy.orm.util import object_mapper as _object_mapper
from sqlalchemy.orm.util import class_mapper as _class_mapper
from sqlalchemy.orm.util import (
_class_to_mapper, _state_mapper,
)
from sqlalchemy.orm.mapper import Mapper, _none_set
from sqlalchemy.orm.unitofwork import UOWTransaction
from sqlalchemy.orm import identity
from sqlalchemy import event
from sqlalchemy.orm.events import SessionEvents
import sys
__all__ = ['Session', 'SessionTransaction', 'SessionExtension']
def sessionmaker(bind=None, class_=None, autoflush=True, autocommit=False,
expire_on_commit=True, **kwargs):
"""Generate a custom-configured :class:`.Session` class.
The returned object is a subclass of :class:`.Session`, which, when instantiated
with no arguments, uses the keyword arguments configured here as its
constructor arguments.
It is intended that the :func:`.sessionmaker()` function be called within the
global scope of an application, and the returned class be made available
to the rest of the application as the single class used to instantiate
sessions.
e.g.::
# global scope
Session = sessionmaker(autoflush=False)
# later, in a local scope, create and use a session:
sess = Session()
Any keyword arguments sent to the constructor itself will override the
"configured" keywords::
Session = sessionmaker()
# bind an individual session to a connection
sess = Session(bind=connection)
The class also includes a special classmethod ``configure()``, which
allows additional configurational options to take place after the custom
``Session`` class has been generated. This is useful particularly for
defining the specific ``Engine`` (or engines) to which new instances of
``Session`` should be bound::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite:///foo.db'))
sess = Session()
For options, see the constructor options for :class:`.Session`.
"""
kwargs['bind'] = bind
kwargs['autoflush'] = autoflush
kwargs['autocommit'] = autocommit
kwargs['expire_on_commit'] = expire_on_commit
if class_ is None:
class_ = Session
class Sess(object):
def __init__(self, **local_kwargs):
for k in kwargs:
local_kwargs.setdefault(k, kwargs[k])
super(Sess, self).__init__(**local_kwargs)
@classmethod
def configure(self, **new_kwargs):
"""(Re)configure the arguments for this sessionmaker.
e.g.::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite://'))
"""
kwargs.update(new_kwargs)
return type("SessionMaker", (Sess, class_), {})
class SessionTransaction(object):
"""A :class:`.Session`-level transaction.
:class:`.SessionTransaction` is a mostly behind-the-scenes object
not normally referenced directly by application code. It coordinates
among multiple :class:`.Connection` objects, maintaining a database
transaction for each one individually, committing or rolling them
back all at once. It also provides optional two-phase commit behavior
which can augment this coordination operation.
The :attr:`.Session.transaction` attribute of :class:`.Session` refers to the
current :class:`.SessionTransaction` object in use, if any.
A :class:`.SessionTransaction` is associated with a :class:`.Session`
in its default mode of ``autocommit=False`` immediately, associated
with no database connections. As the :class:`.Session` is called upon
to emit SQL on behalf of various :class:`.Engine` or :class:`.Connection`
objects, a corresponding :class:`.Connection` and associated :class:`.Transaction`
is added to a collection within the :class:`.SessionTransaction` object,
becoming one of the connection/transaction pairs maintained by the
:class:`.SessionTransaction`.
The lifespan of the :class:`.SessionTransaction` ends when the
:meth:`.Session.commit`, :meth:`.Session.rollback` or :meth:`.Session.close`
methods are called. At this point, the :class:`.SessionTransaction` removes
its association with its parent :class:`.Session`. A :class:`.Session`
that is in ``autocommit=False`` mode will create a new
:class:`.SessionTransaction` to replace it immediately, whereas a
:class:`.Session` that's in ``autocommit=True``
mode will remain without a :class:`.SessionTransaction` until the
:meth:`.Session.begin` method is called.
Another detail of :class:`.SessionTransaction` behavior is that it is
capable of "nesting". This means that the :meth:`.begin` method can
be called while an existing :class:`.SessionTransaction` is already present,
producing a new :class:`.SessionTransaction` that temporarily replaces
the parent :class:`.SessionTransaction`. When a :class:`.SessionTransaction`
is produced as nested, it assigns itself to the :attr:`.Session.transaction`
attribute. When it is ended via :meth:`.Session.commit` or :meth:`.Session.rollback`,
it restores its parent :class:`.SessionTransaction` back onto the
:attr:`.Session.transaction` attribute. The
behavior is effectively a stack, where :attr:`.Session.transaction` refers
to the current head of the stack.
The purpose of this stack is to allow nesting of :meth:`.rollback` or
:meth:`.commit` calls in context with various flavors of :meth:`.begin`.
This nesting behavior applies to when :meth:`.Session.begin_nested`
is used to emit a SAVEPOINT transaction, and is also used to produce
a so-called "subtransaction" which allows a block of code to use a
begin/rollback/commit sequence regardless of whether or not its enclosing
code block has begun a transaction. The :meth:`.flush` method, whether called
explicitly or via autoflush, is the primary consumer of the "subtransaction"
feature, in that it wishes to guarantee that it works within in a transaction block
regardless of whether or not the :class:`.Session` is in transactional mode
when the method is called.
See also:
:meth:`.Session.rollback`
:meth:`.Session.commit`
:meth:`.Session.begin`
:meth:`.Session.begin_nested`
:attr:`.Session.is_active`
:meth:`.SessionEvents.after_commit`
:meth:`.SessionEvents.after_rollback`
:meth:`.SessionEvents.after_soft_rollback`
"""
_rollback_exception = None
def __init__(self, session, parent=None, nested=False):
self.session = session
self._connections = {}
self._parent = parent
self.nested = nested
self._active = True
self._prepared = False
if not parent and nested:
raise sa_exc.InvalidRequestError(
"Can't start a SAVEPOINT transaction when no existing "
"transaction is in progress")
if self.session._enable_transaction_accounting:
self._take_snapshot()
@property
def is_active(self):
return self.session is not None and self._active
def _assert_is_active(self):
self._assert_is_open()
if not self._active:
if self._rollback_exception:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"due to a previous exception during flush."
" To begin a new transaction with this Session, "
"first issue Session.rollback()."
" Original exception was: %s"
% self._rollback_exception
)
else:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"by a nested rollback() call. To begin a new "
"transaction, issue Session.rollback() first."
)
def _assert_is_open(self, error_msg="The transaction is closed"):
if self.session is None:
raise sa_exc.ResourceClosedError(error_msg)
@property
def _is_transaction_boundary(self):
return self.nested or not self._parent
def connection(self, bindkey, **kwargs):
self._assert_is_active()
engine = self.session.get_bind(bindkey, **kwargs)
return self._connection_for_bind(engine)
def _begin(self, nested=False):
self._assert_is_active()
return SessionTransaction(
self.session, self, nested=nested)
def _iterate_parents(self, upto=None):
if self._parent is upto:
return (self,)
else:
if self._parent is None:
raise sa_exc.InvalidRequestError(
"Transaction %s is not on the active transaction list" % (
upto))
return (self,) + self._parent._iterate_parents(upto)
def _take_snapshot(self):
if not self._is_transaction_boundary:
self._new = self._parent._new
self._deleted = self._parent._deleted
self._key_switches = self._parent._key_switches
return
if not self.session._flushing:
self.session.flush()
self._new = weakref.WeakKeyDictionary()
self._deleted = weakref.WeakKeyDictionary()
self._key_switches = weakref.WeakKeyDictionary()
def _restore_snapshot(self):
assert self._is_transaction_boundary
for s in set(self._new).union(self.session._new):
self.session._expunge_state(s)
if s.key:
del s.key
for s, (oldkey, newkey) in self._key_switches.items():
self.session.identity_map.discard(s)
s.key = oldkey
self.session.identity_map.replace(s)
for s in set(self._deleted).union(self.session._deleted):
if s.deleted:
#assert s in self._deleted
del s.deleted
self.session._update_impl(s, discard_existing=True)
assert not self.session._deleted
for s in self.session.identity_map.all_states():
s.expire(s.dict, self.session.identity_map._modified)
def _remove_snapshot(self):
assert self._is_transaction_boundary
if not self.nested and self.session.expire_on_commit:
for s in self.session.identity_map.all_states():
s.expire(s.dict, self.session.identity_map._modified)
def _connection_for_bind(self, bind):
self._assert_is_active()
if bind in self._connections:
return self._connections[bind][0]
if self._parent:
conn = self._parent._connection_for_bind(bind)
if not self.nested:
return conn
else:
if isinstance(bind, engine.Connection):
conn = bind
if conn.engine in self._connections:
raise sa_exc.InvalidRequestError(
"Session already has a Connection associated for the "
"given Connection's Engine")
else:
conn = bind.contextual_connect()
if self.session.twophase and self._parent is None:
transaction = conn.begin_twophase()
elif self.nested:
transaction = conn.begin_nested()
else:
transaction = conn.begin()
self._connections[conn] = self._connections[conn.engine] = \
(conn, transaction, conn is not bind)
self.session.dispatch.after_begin(self.session, self, conn)
return conn
def prepare(self):
if self._parent is not None or not self.session.twophase:
raise sa_exc.InvalidRequestError(
"Only root two phase transactions of can be prepared")
self._prepare_impl()
def _prepare_impl(self):
self._assert_is_active()
if self._parent is None or self.nested:
self.session.dispatch.before_commit(self.session)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.commit()
if not self.session._flushing:
for _flush_guard in xrange(100):
if self.session._is_clean():
break
self.session.flush()
else:
raise exc.FlushError(
"Over 100 subsequent flushes have occurred within "
"session.commit() - is an after_flush() hook "
"creating new objects?")
if self._parent is None and self.session.twophase:
try:
for t in set(self._connections.values()):
t[1].prepare()
except:
self.rollback()
raise
self._deactivate()
self._prepared = True
def commit(self):
self._assert_is_open()
if not self._prepared:
self._prepare_impl()
if self._parent is None or self.nested:
for t in set(self._connections.values()):
t[1].commit()
self.session.dispatch.after_commit(self.session)
if self.session._enable_transaction_accounting:
self._remove_snapshot()
self.close()
return self._parent
def rollback(self, _capture_exception=False):
self._assert_is_open()
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.close()
if self.is_active or self._prepared:
for transaction in self._iterate_parents():
if transaction._parent is None or transaction.nested:
transaction._rollback_impl()
transaction._deactivate()
break
else:
transaction._deactivate()
sess = self.session
if self.session._enable_transaction_accounting and \
not sess._is_clean():
# if items were added, deleted, or mutated
# here, we need to re-restore the snapshot
util.warn(
"Session's state has been changed on "
"a non-active transaction - this state "
"will be discarded.")
self._restore_snapshot()
self.close()
if self._parent and _capture_exception:
self._parent._rollback_exception = sys.exc_info()[1]
sess.dispatch.after_soft_rollback(sess, self)
return self._parent
def _rollback_impl(self):
for t in set(self._connections.values()):
t[1].rollback()
if self.session._enable_transaction_accounting:
self._restore_snapshot()
self.session.dispatch.after_rollback(self.session)
def _deactivate(self):
self._active = False
def close(self):
self.session.transaction = self._parent
if self._parent is None:
for connection, transaction, autoclose in \
set(self._connections.values()):
if autoclose:
connection.close()
else:
transaction.close()
if not self.session.autocommit:
self.session.begin()
self._deactivate()
self.session = None
self._connections = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._assert_is_open("Cannot end transaction context. The transaction "
"was closed from within the context")
if self.session.transaction is None:
return
if type is None:
try:
self.commit()
except:
self.rollback()
raise
else:
self.rollback()
class Session(object):
"""Manages persistence operations for ORM-mapped objects.
The Session's usage paradigm is described at :ref:`session_toplevel`.
"""
public_methods = (
'__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested',
'close', 'commit', 'connection', 'delete', 'execute', 'expire',
'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind',
'is_modified',
'merge', 'query', 'refresh', 'rollback',
'scalar')
def __init__(self, bind=None, autoflush=True, expire_on_commit=True,
_enable_transaction_accounting=True,
autocommit=False, twophase=False,
weak_identity_map=True, binds=None, extension=None,
query_cls=query.Query):
"""Construct a new Session.
See also the :func:`.sessionmaker` function which is used to
generate a :class:`.Session`-producing callable with a given
set of arguments.
:param autocommit: Defaults to ``False``. When ``True``, the ``Session``
does not keep a persistent transaction running, and will acquire
connections from the engine on an as-needed basis, returning them
immediately after their use. Flushes will begin and commit (or possibly
rollback) their own transaction if no transaction is present. When using
this mode, the `session.begin()` method may be used to begin a
transaction explicitly.
Leaving it on its default value of ``False`` means that the ``Session``
will acquire a connection and begin a transaction the first time it is
used, which it will maintain persistently until ``rollback()``,
``commit()``, or ``close()`` is called. When the transaction is released
by any of these methods, the ``Session`` is ready for the next usage,
which will again acquire and maintain a new connection/transaction.
:param autoflush: When ``True``, all query operations will issue a
``flush()`` call to this ``Session`` before proceeding. This is a
convenience feature so that ``flush()`` need not be called repeatedly
in order for database queries to retrieve results. It's typical that
``autoflush`` is used in conjunction with ``autocommit=False``. In this
scenario, explicit calls to ``flush()`` are rarely needed; you usually
only need to call ``commit()`` (which flushes) to finalize changes.
:param bind: An optional ``Engine`` or ``Connection`` to which this
``Session`` should be bound. When specified, all SQL operations
performed by this session will execute via this connectable.
:param binds: An optional dictionary which contains more granular "bind"
information than the ``bind`` parameter provides. This dictionary can
map individual ``Table`` instances as well as ``Mapper`` instances to
individual ``Engine`` or ``Connection`` objects. Operations which
proceed relative to a particular ``Mapper`` will consult this
dictionary for the direct ``Mapper`` instance as well as the mapper's
``mapped_table`` attribute in order to locate an connectable to use.
The full resolution is described in the ``get_bind()`` method of
``Session``. Usage looks like::
Session = sessionmaker(binds={
SomeMappedClass: create_engine('postgresql://engine1'),
somemapper: create_engine('postgresql://engine2'),
some_table: create_engine('postgresql://engine3'),
})
Also see the :meth:`.Session.bind_mapper` and :meth:`.Session.bind_table` methods.
:param \class_: Specify an alternate class other than
``sqlalchemy.orm.session.Session`` which should be used by the returned
class. This is the only argument that is local to the
``sessionmaker()`` function, and is not sent directly to the
constructor for ``Session``.
:param _enable_transaction_accounting: Defaults to ``True``. A
legacy-only flag which when ``False`` disables *all* 0.5-style object
accounting on transaction boundaries, including auto-expiry of
instances on rollback and commit, maintenance of the "new" and
"deleted" lists upon rollback, and autoflush of pending changes upon
begin(), all of which are interdependent.
:param expire_on_commit: Defaults to ``True``. When ``True``, all
instances will be fully expired after each ``commit()``, so that all
attribute/object access subsequent to a completed transaction will load
from the most recent database state.
:param extension: An optional
:class:`~.SessionExtension` instance, or a list
of such instances, which will receive pre- and post- commit and flush
events, as well as a post-rollback event. **Deprecated.**
Please see :class:`.SessionEvents`.
:param query_cls: Class which should be used to create new Query objects,
as returned by the ``query()`` method. Defaults to
:class:`~sqlalchemy.orm.query.Query`.
:param twophase: When ``True``, all transactions will be started as
a "two phase" transaction, i.e. using the "two phase" semantics
of the database in use along with an XID. During a ``commit()``,
after ``flush()`` has been issued for all attached databases, the
``prepare()`` method on each database's ``TwoPhaseTransaction`` will
be called. This allows each database to roll back the entire
transaction, before each transaction is committed.
:param weak_identity_map: Defaults to ``True`` - when set to
``False``, objects placed in the :class:`.Session` will be
strongly referenced until explicitly removed or the
:class:`.Session` is closed. **Deprecated** - this option
is obsolete.
"""
if weak_identity_map:
self._identity_cls = identity.WeakInstanceDict
else:
util.warn_deprecated("weak_identity_map=False is deprecated. "
"This feature is not needed.")
self._identity_cls = identity.StrongInstanceDict
self.identity_map = self._identity_cls()
self._new = {} # InstanceState->object, strong refs object
self._deleted = {} # same
self.bind = bind
self.__binds = {}
self._flushing = False
self.transaction = None
self.hash_key = _new_sessionid()
self.autoflush = autoflush
self.autocommit = autocommit
self.expire_on_commit = expire_on_commit
self._enable_transaction_accounting = _enable_transaction_accounting
self.twophase = twophase
self._query_cls = query_cls
if extension:
for ext in util.to_list(extension):
SessionExtension._adapt_listener(self, ext)
if binds is not None:
for mapperortable, bind in binds.iteritems():
if isinstance(mapperortable, (type, Mapper)):
self.bind_mapper(mapperortable, bind)
else:
self.bind_table(mapperortable, bind)
if not self.autocommit:
self.begin()
_sessions[self.hash_key] = self
dispatch = event.dispatcher(SessionEvents)
connection_callable = None
transaction = None
"""The current active or inactive :class:`.SessionTransaction`."""
def begin(self, subtransactions=False, nested=False):
"""Begin a transaction on this Session.
If this Session is already within a transaction, either a plain
transaction or nested transaction, an error is raised, unless
``subtransactions=True`` or ``nested=True`` is specified.
The ``subtransactions=True`` flag indicates that this :meth:`~.Session.begin`
can create a subtransaction if a transaction is already in progress.
For documentation on subtransactions, please see :ref:`session_subtransactions`.
The ``nested`` flag begins a SAVEPOINT transaction and is equivalent
to calling :meth:`~.Session.begin_nested`. For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
"""
if self.transaction is not None:
if subtransactions or nested:
self.transaction = self.transaction._begin(
nested=nested)
else:
raise sa_exc.InvalidRequestError(
"A transaction is already begun. Use subtransactions=True "
"to allow subtransactions.")
else:
self.transaction = SessionTransaction(
self, nested=nested)
return self.transaction # needed for __enter__/__exit__ hook
def begin_nested(self):
"""Begin a `nested` transaction on this Session.
The target database(s) must support SQL SAVEPOINTs or a
SQLAlchemy-supported vendor implementation of the idea.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
"""
return self.begin(nested=True)
def rollback(self):
"""Rollback the current transaction in progress.
If no transaction is in progress, this method is a pass-through.
This method rolls back the current transaction or nested transaction
regardless of subtransactions being in effect. All subtransactions up
to the first real transaction are closed. Subtransactions occur when
begin() is called multiple times.
"""
if self.transaction is None:
pass
else:
self.transaction.rollback()
def commit(self):
"""Flush pending changes and commit the current transaction.
If no transaction is in progress, this method raises an
InvalidRequestError.
By default, the :class:`.Session` also expires all database
loaded state on all ORM-managed attributes after transaction commit.
This so that subsequent operations load the most recent
data from the database. This behavior can be disabled using
the ``expire_on_commit=False`` option to :func:`.sessionmaker` or
the :class:`.Session` constructor.
If a subtransaction is in effect (which occurs when begin() is called
multiple times), the subtransaction will be closed, and the next call
to ``commit()`` will operate on the enclosing transaction.
For a session configured with autocommit=False, a new transaction will
be begun immediately after the commit, but note that the newly begun
transaction does *not* use any connection resources until the first
SQL is actually emitted.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.commit()
def prepare(self):
"""Prepare the current transaction in progress for two phase commit.
If no transaction is in progress, this method raises an
InvalidRequestError.
Only root transactions of two phase sessions can be prepared. If the
current transaction is not such, an InvalidRequestError is raised.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.prepare()
def connection(self, mapper=None, clause=None,
bind=None,
close_with_result=False,
**kw):
"""Return a :class:`.Connection` object corresponding to this
:class:`.Session` object's transactional state.
If this :class:`.Session` is configured with ``autocommit=False``,
either the :class:`.Connection` corresponding to the current transaction
is returned, or if no transaction is in progress, a new one is begun
and the :class:`.Connection` returned (note that no transactional state
is established with the DBAPI until the first SQL statement is emitted).
Alternatively, if this :class:`.Session` is configured with ``autocommit=True``,
an ad-hoc :class:`.Connection` is returned using :meth:`.Engine.contextual_connect`
on the underlying :class:`.Engine`.
Ambiguity in multi-bind or unbound :class:`.Session` objects can be resolved through
any of the optional keyword arguments. This ultimately makes usage of the
:meth:`.get_bind` method for resolution.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes precedence
over ``mapper``, ``clause``.
:param mapper:
Optional :func:`.mapper` mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause``.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.) which will be used to locate a bind, if a bind
cannot otherwise be identified.
:param close_with_result: Passed to :meth:`Engine.connect`, indicating
the :class:`.Connection` should be considered "single use", automatically
closing when the first result set is closed. This flag only has
an effect if this :class:`.Session` is configured with ``autocommit=True``
and does not already have a transaction in progress.
:param \**kw:
Additional keyword arguments are sent to :meth:`get_bind()`,
allowing additional arguments to be passed to custom
implementations of :meth:`get_bind`.
"""
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind,
close_with_result=close_with_result)
def _connection_for_bind(self, engine, **kwargs):
if self.transaction is not None:
return self.transaction._connection_for_bind(engine)
else:
return engine.contextual_connect(**kwargs)
def execute(self, clause, params=None, mapper=None, bind=None, **kw):
"""Execute a SQL expression construct or string statement within
the current transaction.
Returns a :class:`.ResultProxy` representing
results of the statement execution, in the same manner as that of an
:class:`.Engine` or
:class:`.Connection`.
E.g.::
result = session.execute(
user_table.select().where(user_table.c.id == 5)
)
:meth:`~.Session.execute` accepts any executable clause construct, such
as :func:`~.sql.expression.select`,
:func:`~.sql.expression.insert`,
:func:`~.sql.expression.update`,
:func:`~.sql.expression.delete`, and
:func:`~.sql.expression.text`. Plain SQL strings can be passed
as well, which in the case of :meth:`.Session.execute` only
will be interpreted the same as if it were passed via a :func:`~.expression.text`
construct. That is, the following usage::
result = session.execute(
"SELECT * FROM user WHERE id=:param",
{"param":5}
)
is equivalent to::
from sqlalchemy import text
result = session.execute(
text("SELECT * FROM user WHERE id=:param"),
{"param":5}
)
The second positional argument to :meth:`.Session.execute` is an
optional parameter set. Similar to that of :meth:`.Connection.execute`, whether this
is passed as a single dictionary, or a list of dictionaries, determines
whether the DBAPI cursor's ``execute()`` or ``executemany()`` is used to execute the
statement. An INSERT construct may be invoked for a single row::
result = session.execute(users.insert(), {"id": 7, "name": "somename"})
or for multiple rows::
result = session.execute(users.insert(), [
{"id": 7, "name": "somename7"},
{"id": 8, "name": "somename8"},
{"id": 9, "name": "somename9"}
])
The statement is executed within the current transactional context of
this :class:`.Session`. The :class:`.Connection` which is used
to execute the statement can also be acquired directly by
calling the :meth:`.Session.connection` method. Both methods use
a rule-based resolution scheme in order to determine the
:class:`.Connection`, which in the average case is derived directly
from the "bind" of the :class:`.Session` itself, and in other cases
can be based on the :func:`.mapper`
and :class:`.Table` objects passed to the method; see the documentation
for :meth:`.Session.get_bind` for a full description of this scheme.
The :meth:`.Session.execute` method does *not* invoke autoflush.
The :class:`.ResultProxy` returned by the :meth:`.Session.execute`
method is returned with the "close_with_result" flag set to true;
the significance of this flag is that if this :class:`.Session` is
autocommitting and does not have a transaction-dedicated :class:`.Connection`
available, a temporary :class:`.Connection` is established for the
statement execution, which is closed (meaning, returned to the connection
pool) when the :class:`.ResultProxy` has consumed all available data.
This applies *only* when the :class:`.Session` is configured with
autocommit=True and no transaction has been started.
:param clause:
An executable statement (i.e. an :class:`.Executable` expression
such as :func:`.expression.select`) or string SQL statement
to be executed.
:param params:
Optional dictionary, or list of dictionaries, containing
bound parameter values. If a single dictionary, single-row
execution occurs; if a list of dictionaries, an
"executemany" will be invoked. The keys in each dictionary
must correspond to parameter names present in the statement.
:param mapper:
Optional :func:`.mapper` or mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause`` when locating a bind. See :meth:`.Session.get_bind`
for more details.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes
precedence over ``mapper`` and ``clause`` when locating
a bind.
:param \**kw:
Additional keyword arguments are sent to :meth:`.Session.get_bind()`
to allow extensibility of "bind" schemes.
.. seealso::
:ref:`sqlexpression_toplevel` - Tutorial on using Core SQL
constructs.
:ref:`connections_toplevel` - Further information on direct
statement execution.
:meth:`.Connection.execute` - core level statement execution
method, which is :meth:`.Session.execute` ultimately uses
in order to execute the statement.
"""
clause = expression._literal_as_text(clause)
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind, close_with_result=True).execute(
clause, params or {})
def scalar(self, clause, params=None, mapper=None, bind=None, **kw):
"""Like :meth:`~.Session.execute` but return a scalar result."""
return self.execute(clause, params=params, mapper=mapper, bind=bind, **kw).scalar()
def close(self):
"""Close this Session.
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
"""
self.expunge_all()
if self.transaction is not None:
for transaction in self.transaction._iterate_parents():
transaction.close()
@classmethod
def close_all(cls):
"""Close *all* sessions in memory."""
for sess in _sessions.values():
sess.close()
def expunge_all(self):
"""Remove all object instances from this ``Session``.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
"""
for state in self.identity_map.all_states() + list(self._new):
state.detach()
self.identity_map = self._identity_cls()
self._new = {}
self._deleted = {}
# TODO: need much more test coverage for bind_mapper() and similar !
# TODO: + crystalize + document resolution order vis. bind_mapper/bind_table
def bind_mapper(self, mapper, bind):
"""Bind operations for a mapper to a Connectable.
mapper
A mapper instance or mapped class
bind
Any Connectable: a ``Engine`` or ``Connection``.
All subsequent operations involving this mapper will use the given
`bind`.
"""
if isinstance(mapper, type):
mapper = _class_mapper(mapper)
self.__binds[mapper.base_mapper] = bind
for t in mapper._all_tables:
self.__binds[t] = bind
def bind_table(self, table, bind):
"""Bind operations on a Table to a Connectable.
table
A ``Table`` instance
bind
Any Connectable: a ``Engine`` or ``Connection``.
All subsequent operations involving this ``Table`` will use the
given `bind`.
"""
self.__binds[table] = bind
def get_bind(self, mapper=None, clause=None):
"""Return a "bind" to which this :class:`.Session` is bound.
The "bind" is usually an instance of :class:`.Engine`,
except in the case where the :class:`.Session` has been
explicitly bound directly to a :class:`.Connection`.
For a multiply-bound or unbound :class:`.Session`, the
``mapper`` or ``clause`` arguments are used to determine the
appropriate bind to return.
Note that the "mapper" argument is usually present
when :meth:`.Session.get_bind` is called via an ORM
operation such as a :meth:`.Session.query`, each
individual INSERT/UPDATE/DELETE operation within a
:meth:`.Session.flush`, call, etc.
The order of resolution is:
1. if mapper given and session.binds is present,
locate a bind based on mapper.
2. if clause given and session.binds is present,
locate a bind based on :class:`.Table` objects
found in the given clause present in session.binds.
3. if session.bind is present, return that.
4. if clause given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the clause.
5. if mapper given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the :class:`.Table` or other
selectable to which the mapper is mapped.
6. No bind can be found, :class:`.UnboundExecutionError`
is raised.
:param mapper:
Optional :func:`.mapper` mapped class or instance of
:class:`.Mapper`. The bind can be derived from a :class:`.Mapper`
first by consulting the "binds" map associated with this
:class:`.Session`, and secondly by consulting the :class:`.MetaData`
associated with the :class:`.Table` to which the :class:`.Mapper`
is mapped for a bind.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.). If the ``mapper`` argument is not present or could not produce
a bind, the given expression construct will be searched for a bound
element, typically a :class:`.Table` associated with bound
:class:`.MetaData`.
"""
if mapper is clause is None:
if self.bind:
return self.bind
else:
raise sa_exc.UnboundExecutionError(
"This session is not bound to a single Engine or "
"Connection, and no context was provided to locate "
"a binding.")
c_mapper = mapper is not None and _class_to_mapper(mapper) or None
# manually bound?
if self.__binds:
if c_mapper:
if c_mapper.base_mapper in self.__binds:
return self.__binds[c_mapper.base_mapper]
elif c_mapper.mapped_table in self.__binds:
return self.__binds[c_mapper.mapped_table]
if clause is not None:
for t in sql_util.find_tables(clause, include_crud=True):
if t in self.__binds:
return self.__binds[t]
if self.bind:
return self.bind
if isinstance(clause, sql.expression.ClauseElement) and clause.bind:
return clause.bind
if c_mapper and c_mapper.mapped_table.bind:
return c_mapper.mapped_table.bind
context = []
if mapper is not None:
context.append('mapper %s' % c_mapper)
if clause is not None:
context.append('SQL expression')
raise sa_exc.UnboundExecutionError(
"Could not locate a bind configured on %s or this Session" % (
', '.join(context)))
def query(self, *entities, **kwargs):
"""Return a new ``Query`` object corresponding to this ``Session``."""
return self._query_cls(entities, self, **kwargs)
@property
@util.contextmanager
def no_autoflush(self):
"""Return a context manager that disables autoflush.
e.g.::
with session.no_autoflush:
some_object = SomeClass()
session.add(some_object)
# won't autoflush
some_object.related_thing = session.query(SomeRelated).first()
Operations that proceed within the ``with:`` block
will not be subject to flushes occurring upon query
access. This is useful when initializing a series
of objects which involve existing database queries,
where the uncompleted object should not yet be flushed.
.. versionadded:: 0.7.6
"""
autoflush = self.autoflush
self.autoflush = False
yield self
self.autoflush = autoflush
def _autoflush(self):
if self.autoflush and not self._flushing:
self.flush()
def _finalize_loaded(self, states):
| for state, dict_ in states.items(): | 4,268 | lcc_e | python | null | 2a39703131e8541ff29487b26d6b1a3f4eaeb5d61a7399f4 |
|
# coding=utf-8
import os, sys, datetime, unicodedata
import xbmc, xbmcgui, xbmcvfs, urllib
import xml.etree.ElementTree as xmltree
from xml.dom.minidom import parse
from xml.sax.saxutils import escape as escapeXML
import thread
from traceback import print_exc
from unicodeutils import try_decode
import calendar
from time import gmtime, strftime
import random
import datafunctions
DATA = datafunctions.DataFunctions()
import library
LIBRARY = library.LibraryFunctions()
if sys.version_info < (2, 7):
import simplejson
else:
import json as simplejson
ADDON = sys.modules[ "__main__" ].ADDON
ADDONID = sys.modules[ "__main__" ].ADDONID
CWD = sys.modules[ "__main__" ].CWD
DATAPATH = os.path.join( xbmc.translatePath( "special://profile/addon_data/" ).decode('utf-8'), ADDONID )
SKINPATH = xbmc.translatePath( "special://skin/shortcuts/" ).decode('utf-8')
DEFAULTPATH = xbmc.translatePath( os.path.join( CWD, 'resources', 'shortcuts').encode("utf-8") ).decode("utf-8")
LANGUAGE = ADDON.getLocalizedString
KODIVERSION = xbmc.getInfoLabel( "System.BuildVersion" ).split(".")[0]
ACTION_CANCEL_DIALOG = ( 9, 10, 92, 216, 247, 257, 275, 61467, 61448, )
ACTION_CONTEXT_MENU = ( 117, )
ISEMPTY = "IsEmpty"
if int( KODIVERSION ) >= 17:
ISEMPTY = "String.IsEmpty"
if not xbmcvfs.exists(DATAPATH):
xbmcvfs.mkdir(DATAPATH)
def log(txt):
if ADDON.getSetting( "enable_logging" ) == "true":
try:
if isinstance (txt,str):
txt = txt.decode('utf-8')
message = u'%s: %s' % (ADDONID, txt)
xbmc.log(msg=message.encode('utf-8'), level=xbmc.LOGDEBUG)
except:
pass
def is_hebrew(text):
if type(text) != unicode:
text = text.decode('utf-8')
for chr in text:
if ord(chr) >= 1488 and ord(chr) <= 1514:
return True
return False
class GUI( xbmcgui.WindowXMLDialog ):
def __init__( self, *args, **kwargs ):
self.group = kwargs[ "group" ]
try:
self.defaultGroup = kwargs[ "defaultGroup" ]
if self.defaultGroup == "":
self.defaultGroup = None
except:
self.defaultGroup = None
self.nolabels = kwargs[ "nolabels" ]
self.groupname = kwargs[ "groupname" ]
self.shortcutgroup = 1
# Empty arrays for different shortcut types
self.thumbnailBrowseDefault = None
self.thumbnailNone = None
self.backgroundBrowse = None
self.backgroundBrowseDefault = None
self.widgetPlaylists = False
self.widgetPlaylistsType = None
self.widgetRename = True
# Variables for overrides
self.onBack = {}
self.saveWithProperty = []
# Has skin overriden GUI 308
self.alwaysReset = False
self.alwaysRestore = False
self.allListItems = []
# Additional button ID's we'll handle for setting custom properties
self.customPropertyButtons = {}
self.customToggleButtons = {}
# Context menu
self.contextControls = []
self.contextItems = []
# Onclicks
self.customOnClick = {}
self.windowProperties = {}
self.changeMade = False
log( 'Management module loaded' )
def onInit( self ):
if self.group == '':
self._close()
else:
self.window_id = xbmcgui.getCurrentWindowDialogId()
self.currentWindow = xbmcgui.Window( xbmcgui.getCurrentWindowDialogId() )
xbmcgui.Window(self.window_id).setProperty('groupname', self.group)
if self.groupname is not None:
xbmcgui.Window( self.window_id ).setProperty( 'groupDisplayName', self.groupname )
# Load widget and background names
self._load_overrides()
# Load context menu options
self._load_overrides_context()
# Load additional onclick overrides
self._load_overrides_onclick()
# Load additional button ID's we'll handle for custom properties
self._load_customPropertyButtons()
# Load current shortcuts
self.load_shortcuts()
# Set window title label
try:
if self.getControl( 500 ).getLabel() == "":
if self.group == "mainmenu":
self.getControl( 500 ).setLabel( LANGUAGE(32071) )
elif self.groupname is not None:
self.getControl( 500 ).setLabel( LANGUAGE(32080).replace( "::MENUNAME::", self.groupname ) )
else:
self.getControl( 500 ).setLabel( LANGUAGE(32072) )
except:
pass
# Set enabled condition for various controls
has111 = True
try:
self.getControl( 111 ).setEnableCondition( "%s(Container(211).ListItem.Property(LOCKED))" %( ISEMPTY ) )
except:
has111 = False
try:
self.getControl( 302 ).setEnableCondition( "%s(Container(211).ListItem.Property(LOCKED))" %( ISEMPTY ) )
except:
pass
try:
self.getControl( 307 ).setEnableCondition( "%s(Container(211).ListItem.Property(LOCKED))" %( ISEMPTY ) )
except:
pass
try:
self.getControl( 401 ).setEnableCondition( "%s(Container(211).ListItem.Property(LOCKED))" %( ISEMPTY ) )
except:
pass
# Set button labels
if self.nolabels == "false":
try:
if self.getControl( 301 ).getLabel() == "":
self.getControl( 301 ).setLabel( LANGUAGE(32000) )
except:
log( "No add shortcut button on GUI (id 301)" )
try:
if self.getControl( 302 ).getLabel() == "":
self.getControl( 302 ).setLabel( LANGUAGE(32001) )
except:
log( "No delete shortcut button on GUI (id 302)" )
try:
if self.getControl( 303 ).getLabel() == "":
self.getControl( 303 ).setLabel( LANGUAGE(32002) )
except:
log( "No move shortcut up button on GUI (id 303)" )
try:
if self.getControl( 304 ).getLabel() == "":
self.getControl( 304 ).setLabel( LANGUAGE(32003) )
except:
log( "No move shortcut down button on GUI (id 304)" )
try:
if self.getControl( 305 ).getLabel() == "":
self.getControl( 305 ).setLabel( LANGUAGE(32025) )
except:
log( "Not set label button on GUI (id 305)" )
try:
if self.getControl( 306 ).getLabel() == "":
self.getControl( 306 ).setLabel( LANGUAGE(32026) )
except:
log( "No edit thumbnail button on GUI (id 306)" )
try:
if self.getControl( 307 ).getLabel() == "":
self.getControl( 307 ).setLabel( LANGUAGE(32027) )
except:
log( "Not edit action button on GUI (id 307)" )
try:
if self.getControl( 308 ).getLabel() == "":
self.getControl( 308 ).setLabel( LANGUAGE(32028) )
except:
log( "No reset shortcuts button on GUI (id 308)" )
try:
if self.getControl( 309 ).getLabel() == "":
self.getControl( 309 ).setLabel( LANGUAGE(32044) )
log( "Warning: Deprecated widget button (id 309)" )
except:
pass
try:
if self.getControl( 310 ).getLabel() == "":
self.getControl( 310 ).setLabel( LANGUAGE(32045) )
except:
log( "No background button on GUI (id 310)" )
try:
if self.getControl( 312 ).getLabel() == "":
self.getControl( 312 ).setLabel( LANGUAGE(32044) )
except:
log( "No widget button on GUI (id 309)" )
try:
if self.getControl( 401 ).getLabel() == "":
self.getControl( 401 ).setLabel( LANGUAGE(32048) )
except:
log( "No widget button on GUI (id 401)" )
# Load library shortcuts in thread
thread.start_new_thread( LIBRARY.loadAllLibrary, () )
if has111:
try:
self._display_shortcuts()
except:
pass
# Clear window property indicating we're loading
xbmcgui.Window( 10000 ).clearProperty( "skinshortcuts-loading" )
# ======================
# === LOAD/SAVE DATA ===
# ======================
def load_shortcuts( self, includeUserShortcuts = True, addShortcutsToWindow = True ):
log( "Loading shortcuts" )
DATA._clear_labelID()
isSubLevel = False
if "." in self.group and self.group.rsplit( ".", 1)[ 1 ].isdigit() and int( self.group.rsplit( ".", 1 )[ 1 ] ) in range( 1, 6 ):
isSubLevel = True
if includeUserShortcuts:
shortcuts = DATA._get_shortcuts( self.group, defaultGroup = self.defaultGroup, isSubLevel = isSubLevel )
else:
shortcuts = DATA._get_shortcuts( self.group, defaultGroup = self.defaultGroup, defaultsOnly = True )
#listitems = []
for shortcut in shortcuts.getroot().findall( "shortcut" ):
# Parse the shortcut, and add it to the list of shortcuts
item = self._parse_shortcut( shortcut )
self.allListItems.append( item[1] )
# Add all visible shortcuts to control 211
self._display_listitems()
def _display_listitems( self, focus = None ):
# Displays listitems that are visible from self.allListItems
# Initial properties
count = 0
visible = False
DATA._clear_labelID()
listitems = []
for listitem in self.allListItems:
# Get icon overrides
self._get_icon_overrides( listitem )
# Set order index in case its changed
listitem.setProperty( "skinshortcuts-orderindex", str( count ) )
shouldDisplay = True
# Check for a visibility condition
if listitem.getProperty( "visible-condition" ):
shouldDisplay = xbmc.getCondVisibility( listitem.getProperty( "visible-condition" ) )
if shouldDisplay == True:
visible = True
listitems.append( listitem )
# Increase our count
count += 1
# If there are no shortcuts, add a blank one
if visible == False:
listitem = xbmcgui.ListItem( LANGUAGE(32013), iconImage = "DefaultShortcut.png" )
listitem.setProperty( "Path", 'noop' )
listitem.setProperty( "icon", "DefaultShortcut.png" )
listitem.setProperty( "skinshortcuts-orderindex", str( count ) )
listitems.append( listitem )
self.allListItems.append( listitem )
self.getControl( 211 ).reset()
self.getControl( 211 ).addItems( listitems )
if focus is not None:
self.getControl( 211 ).selectItem( focus )
self._add_additional_properties()
def _parse_shortcut( self, item ):
# Parse a shortcut node
localLabel = DATA.local( item.find( "label" ).text )
localLabel2 = DATA.local( item.find( "label2" ).text )
# Get icon and thumb (and set to None if there isn't any)
icon = item.find( "icon" )
if icon is not None and icon.text:
icon = icon.text
else:
icon = "DefaultShortcut.png"
thumb = item.find( "thumb" )
if thumb is not None and thumb.text:
thumb = thumb.text
else:
thumb = ""
# If either localLabel[ 2 ] starts with a $, ask Kodi to parse it for us
if localLabel[ 2 ].startswith( "$" ):
localLabel[ 2 ] = xbmc.getInfoLabel( localLabel[ 2 ] )
if localLabel2[ 2 ].startswith( "$" ):
localLabel2[ 2 ] = xbmc.getInfoLabel( localLabel2[ 2 ] )
# Create the list item
listitem = xbmcgui.ListItem( label=localLabel[2], label2 = localLabel2[2], iconImage = xbmc.getInfoLabel(icon), thumbnailImage = xbmc.getInfoLabel(thumb) )
listitem.setProperty( "localizedString", localLabel[0] )
listitem.setProperty( "icon", icon )
listitem.setProperty( "thumbnail", thumb )
# Set the action
action = item.find( "action" ).text
self._add_additionalproperty( listitem, "translatedPath", action )
if "special://skin/" in action:
translate = xbmc.translatePath( "special://skin/" ).decode( "utf-8" )
action = action.replace( "special://skin/", translate )
listitem.setProperty( "path", action )
listitem.setProperty( "displayPath", action )
# Set the disabled property
if item.find( "disabled" ) is not None:
listitem.setProperty( "skinshortcuts-disabled", "True" )
else:
listitem.setProperty( "skinshortcuts-disabled", "False" )
# If there's an overriden icon, use it
overridenIcon = item.find( "override-icon" )
if overridenIcon is not None:
listitem.setIconImage( overridenIcon.text )
listitem.setProperty( "icon", overridenIcon.text )
listitem.setProperty( "original-icon", icon )
# Set the labelID, displayID, shortcutType
listitem.setProperty( "labelID", item.find( "labelID" ).text )
listitem.setProperty( "defaultID", item.find( "defaultID" ).text )
listitem.setProperty( "shortcutType", localLabel2[0] )
# Set any visible condition
isVisible = True
visibleCondition = item.find( "visible" )
if visibleCondition is not None:
listitem.setProperty( "visible-condition", visibleCondition.text )
isVisible = xbmc.getCondVisibility( visibleCondition.text )
# Check if the shortcut is locked
locked = item.find( "lock" )
if locked is not None:
if locked.text.lower() == "true" or locked.text == xbmc.getSkinDir():
listitem.setProperty( "LOCKED", locked.text )
# Additional properties
additionalProperties = item.find( "additional-properties" )
if additionalProperties is not None:
listitem.setProperty( "additionalListItemProperties", additionalProperties.text )
else:
listitem.setProperty( "additionalListItemProperties", "[]" )
self._add_additional_properties( listitem )
return [ isVisible, listitem ]
def _add_additional_properties( self, listitem = None ):
allProps = {}
backgroundName = None
backgroundPlaylistName = None
# If the listitem is None, grab the current listitem from 211
if listitem is None:
listitem = self.getControl( 211 ).getSelectedItem()
# Process current properties
currentProperties = listitem.getProperty( "skinshortcuts-allproperties" )
if currentProperties != "":
currentProperties = eval( currentProperties )
else:
currentProperties = {}
# Process all custom properties
customProperties = listitem.getProperty( "additionalListItemProperties" )
if customProperties != "":
customProperties = eval( customProperties )
for customProperty in customProperties:
if customProperty[1].startswith("$") and not customProperty[ 1 ].startswith( "$SKIN" ):
#Translate some listItem properties if needed so they're displayed correctly in the gui
allProps[ customProperty[ 0 ] ] = xbmc.getInfoLabel( customProperty[ 1 ] )
else:
allProps[ customProperty[ 0 ] ] = DATA.local( customProperty[ 1 ] )[ 2 ]
if customProperty[ 1 ].isdigit():
allProps[ "%s-NUM" %( customProperty[ 0 ] ) ] = customProperty[ 1 ]
# if this is backgroundName or backgroundPlaylistName, keep them so we can localise them properly
if customProperty[0] == "backgroundName":
backgroundName = customProperty[1]
if customProperty[1] == "backgroundPlaylistName":
backgroundPlaylistName = customProperty[1]
# If we've kept backgroundName, localise it with the updated playlist name
if backgroundName is not None and backgroundPlaylistName is not None:
allProps[ "backgroundName" ] = DATA.local( backgroundName )[2].replace( "::PLAYLIST::", backgroundPlaylistName )
# Get fallback properties
fallbackProperties, fallbacks = DATA._getCustomPropertyFallbacks( self.group )
# Add fallback properties
for key in fallbackProperties:
if key not in allProps.keys():
# Check whether we have a fallback for the value
for propertyMatch in fallbacks[ key ]:
matches = False
if propertyMatch[ 1 ] is None:
# This has no conditions, so it matched
matches = True
elif propertyMatch[ 1 ] in allProps.keys() and allProps[ propertyMatch[ 1 ] ] == propertyMatch[ 2 ]:
matches = True
if matches:
allProps[ key ] = propertyMatch[ 0 ]
break
# Get property requirements
otherProperties, requires, templateOnly = DATA._getPropertyRequires()
# Remove any properties whose requirements haven't been met
for key in otherProperties:
if key in allProps.keys() and key in requires.keys() and requires[ key ] not in allProps.keys():
# This properties requirements aren't met
allProps.pop( key )
if "%s-NUM" %( key ) in allProps.keys():
allProps.pop( "%s-NUM" %( key ) )
# Save the new properties to the listitem
listitem.setProperty( "skinshortcuts-allproperties", repr( allProps ) )
added, removed, changed = self.DictDiffer( allProps, currentProperties )
for key in added:
listitem.setProperty( key, allProps[ key ] )
for key in removed:
if key not in allProps.keys(): continue
listitem.setProperty( key, None )
for key in changed:
listitem.setProperty( key, allProps[ key ] )
# Save the new properties to the window
added, removed, changed = self.DictDiffer( allProps, self.windowProperties )
for key in added:
self.currentWindow.setProperty( key, allProps[ key ] )
for key in removed:
self.currentWindow.clearProperty( key )
for key in changed:
self.currentWindow.setProperty( key, allProps[ key ] )
self.windowProperties = allProps
def DictDiffer( self, current_dict, past_dict ):
# Get differences between dictionaries
self.current_dict, self.past_dict = current_dict, past_dict
set_current, set_past = set(current_dict.keys()), set(past_dict.keys())
intersect = set_current.intersection(set_past)
# Added Removed Changed
return( set_current - intersect, set_past - intersect, set(o for o in intersect if past_dict[o] != current_dict[o]) )
def _get_icon_overrides( self, listitem, setToDefault = True, labelID = None ):
# Start by getting the labelID
if not labelID:
labelID = listitem.getProperty( "localizedString" )
if labelID == None or labelID == "":
labelID = listitem.getLabel()
labelID = DATA._get_labelID( DATA.local( labelID )[3], listitem.getProperty( "path" ) )
# Retrieve icon
icon = listitem.getProperty( "icon" )
oldicon = None
iconIsVar = False
if listitem.getProperty( "untranslatedIcon" ):
iconIsVar = True
# If the icon is a VAR or an INFO, we're going to translate it and set the untranslatedIcon property
if icon.startswith( "$" ):
listitem.setProperty( "untranslatedIcon", icon )
icon = xbmc.getInfoLabel( icon )
listitem.setProperty( "icon", icon )
listitem.setIconImage( icon )
iconIsVar = True
if icon.startswith("resource://"):
iconIsVar = True
# Check for overrides
tree = DATA._get_overrides_skin()
for elem in tree.findall( "icon" ):
if oldicon is None:
if ("labelID" in elem.attrib and elem.attrib.get( "labelID" ) == labelID) or ("image" in elem.attrib and elem.attrib.get( "image" ) == icon):
# LabelID matched
if "group" in elem.attrib:
if elem.attrib.get( "group" ) == self.group:
# Group also matches - change icon
oldicon = icon
icon = elem.text
elif "grouping" not in elem.attrib:
# No group - change icon
oldicon = icon
icon = elem.text
# If the skin doesn't have the icon, replace it with DefaultShortcut.png
setDefault = False
if ( not xbmc.skinHasImage( icon ) and setToDefault == True ) and not iconIsVar:
if oldicon == None:
oldicon = icon
setDefault = True
icon = "DefaultShortcut.png"
# If we changed the icon, update the listitem
if oldicon is not None:
listitem.setIconImage( icon )
listitem.setProperty( "icon", icon )
listitem.setProperty( "original-icon", oldicon )
if setDefault == True and setToDefault == True:
# We set this to the default icon, so we need to check if /that/ icon is overriden
self._get_icon_overrides( listitem, False, labelID )
def _save_shortcuts( self, weEnabledSystemDebug = False, weEnabledScriptDebug = False ):
# Entry point to save shortcuts - we will call the _save_shortcuts_function and, if it
# fails, enable debug options (if not enabled) + recreate the error, then offer to upload
# debug log (if relevant add-on is installed)
# Save the shortcuts
try:
self._save_shortcuts_function()
return
except:
print_exc()
log( "Failed to save shortcuts" )
# We failed to save the shortcuts
if weEnabledSystemDebug or weEnabledScriptDebug:
# Disable any logging we enabled
if weEnabledSystemDebug:
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method":"Settings.setSettingValue", "params": {"setting":"debug.showloginfo", "value":false} } ' )
if weEnabledScriptDebug:
ADDON.setSetting( "enable_logging", "false" )
if xbmc.getCondVisibility( "System.HasAddon( script.kodi.loguploader)" ):
# Offer to upload a debug log
ret = xbmcgui.Dialog().yesno( ADDON.getAddonInfo( "name" ), LANGUAGE( 32097 ), LANGUAGE( 32093 ) )
if ret:
xbmc.executebuiltin( "RunScript(script.kodi.loguploader)" )
else:
# Inform user menu couldn't be saved
xbmcgui.Dialog().ok( ADDON.getAddonInfo( "name" ), LANGUAGE( 32097 ), LANGUAGE( 32094 ) )
# We're done
return
# Enable any debug logging needed
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method": "Settings.getSettings" }')
json_query = unicode(json_query, 'utf-8', errors='ignore')
json_response = simplejson.loads(json_query)
enabledSystemDebug = False
enabledScriptDebug = False
if json_response.has_key('result') and json_response['result'].has_key('settings') and json_response['result']['settings'] is not None:
for item in json_response['result']['settings']:
if item["id"] == "debug.showloginfo":
if item["value"] == False:
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method":"Settings.setSettingValue", "params": {"setting":"debug.showloginfo", "value":true} } ' )
enabledSystemDebug = True
if ADDON.getSetting( "enable_logging" ) != "true":
ADDON.setSetting( "enable_logging", "true" )
enabledScriptDebug = True
if enabledSystemDebug or enabledScriptDebug:
# We enabled one or more of the debug options, re-run this function
self._save_shortcuts( enabledSystemDebug, enabledScriptDebug )
else:
if xbmc.getCondVisibility( "System.HasAddon( script.kodi.loguploader )" ):
# Offer to upload a debug log
ret = xbmcgui.Dialog().yesno( ADDON.getAddonInfo( "name" ), LANGUAGE( 32097 ), LANGUAGE( 32093 ) )
if ret:
xbmc.executebuiltin( "RunScript(script.kodi.loguploader)" )
else:
# Inform user menu couldn't be saved
xbmcgui.Dialog().ok( ADDON.getAddonInfo( "name" ), LANGUAGE( 32097 ), LANGUAGE( 32094 ) )
def _save_shortcuts_function( self ):
# Save shortcuts
if self.changeMade == True:
log( "Saving changes" )
# Create a new tree
tree = xmltree.ElementTree( xmltree.Element( "shortcuts" ) )
root = tree.getroot()
properties = []
labelIDChanges = []
labelIDChangesDict = {}
DATA._clear_labelID()
for listitem in self.allListItems:
# If the item has a label or an action, or a specified property from the override is present
if try_decode( listitem.getLabel() ) != LANGUAGE(32013) or listitem.getProperty( "path" ) != "noop" or self.hasSaveWithProperty( listitem ):
# Generate labelID, and mark if it has changed
labelID = listitem.getProperty( "labelID" )
newlabelID = labelID
# defaultID
defaultID = try_decode( listitem.getProperty( "defaultID" ) )
localizedString = listitem.getProperty( "localizedString" )
if localizedString is None or localizedString == "":
localLabel = DATA.local( listitem.getLabel() )
else:
localLabel = DATA.local( localizedString )
newlabelID = DATA._get_labelID( localLabel[3], listitem.getProperty( "path" ) )
if self.group == "mainmenu":
labelIDChanges.append( [labelID, newlabelID, defaultID] )
labelIDChangesDict[ labelID ] = newlabelID
# We want to save this
shortcut = xmltree.SubElement( root, "shortcut" )
xmltree.SubElement( shortcut, "defaultID" ).text = defaultID
# Label and label2
xmltree.SubElement( shortcut, "label" ).text = localLabel[0]
xmltree.SubElement( shortcut, "label2" ).text = DATA.local( listitem.getLabel2() )[0]
# Icon and thumbnail
if listitem.getProperty( "untranslatedIcon" ):
icon = listitem.getProperty( "untranslatedIcon" )
else:
if listitem.getProperty( "original-icon" ):
icon = listitem.getProperty( "original-icon" )
else:
icon = listitem.getProperty( "icon" )
thumb = listitem.getProperty( "thumbnail" )
xmltree.SubElement( shortcut, "icon" ).text = try_decode( icon )
xmltree.SubElement( shortcut, "thumb" ).text = try_decode( thumb )
# Action
xmltree.SubElement( shortcut, "action" ).text = try_decode( listitem.getProperty( "path" ) )
# Visible
if listitem.getProperty( "visible-condition" ):
xmltree.SubElement( shortcut, "visible" ).text = listitem.getProperty( "visible-condition" )
# Disabled
if listitem.getProperty( "skinshortcuts-disabled" ) == "True":
xmltree.SubElement( shortcut, "disabled" ).text = "True"
# Locked
if listitem.getProperty( "LOCKED" ):
xmltree.SubElement( shortcut, "lock" ).text = listitem.getProperty( "LOCKED" )
# Additional properties
if listitem.getProperty( "additionalListItemProperties" ):
additionalProperties = eval( listitem.getProperty( "additionalListItemProperties" ) )
if icon != "":
additionalProperties.append( [ "icon", icon ] )
if thumb != "":
additionalProperties.append( [ "thumb", thumb ] )
properties.append( [ newlabelID, additionalProperties ] )
# Check whether this is an additional level
isSubLevel = False
if "." in self.group and self.group.rsplit( ".", 1 )[ 1 ].isdigit() and int( self.group.rsplit( ".", 1 )[ 1 ] ) in range( 1, 6 ):
isSubLevel = True
# Save the shortcuts
DATA.indent( root )
path = os.path.join( DATAPATH , DATA.slugify( self.group, True, isSubLevel = isSubLevel ) + ".DATA.xml" )
path = try_decode( path )
tree.write( path.replace( ".shortcuts", ".DATA.xml" ), encoding="UTF-8" )
# Now make any labelID changes
copyDefaultProperties = []
while not len( labelIDChanges ) == 0:
# Get the first labelID change, and check that we're not changing anything from that
labelIDFrom = labelIDChanges[0][0]
labelIDTo = labelIDChanges[0][1]
defaultIDFrom = labelIDChanges[0][2]
# If labelIDFrom is empty. this is a new item so we want to set the From the same as the To
# (this will ensure any default .shortcuts file is copied across)
if labelIDFrom == "" or labelIDFrom is None:
labelIDFrom = labelIDTo
# Check that there isn't another item in the list whose 'From' is the same as our 'To'
# - if so, we're going to move our items elsewhere, and move 'em to the correct place later
# (This ensures we don't overwrite anything incorrectly)
if not len( labelIDChanges ) == 1:
for x in range( 1, len( labelIDChanges ) ):
if labelIDChanges[x][0] == labelIDTo:
tempLocation = str( random.randrange(0,9999999999999999) )
labelIDChanges[0][1] = tempLocation
labelIDChanges.append( [tempLocation, labelIDTo, defaultIDFrom] )
labelIDTo = tempLocation
break
# Make the change (0 - the main sub-menu, 1-5 - additional submenus )
for i in range( 0, 6 ):
if i == 0:
groupName = labelIDFrom
paths = [[os.path.join( DATAPATH, DATA.slugify( labelIDFrom, True ) + ".DATA.xml" ).encode( "utf-8" ), "Move"], [os.path.join( SKINPATH, DATA.slugify( defaultIDFrom ) + ".DATA.xml" ).encode( "utf-8" ), "Copy"], [os.path.join( DEFAULTPATH, DATA.slugify( defaultIDFrom ) + ".DATA.xml" ).encode( "utf-8" ), "Copy"], [None, "New"]]
target = os.path.join( DATAPATH, DATA.slugify( labelIDTo, True ) + ".DATA.xml" ).encode( "utf-8" )
else:
groupName = "%s.%s" %( labelIDFrom, str( i ) )
paths = [[os.path.join( DATAPATH, DATA.slugify( "%s.%s" %( labelIDFrom, str( i )), True, isSubLevel = True ) + ".DATA.xml" ).encode( "utf-8" ), "Move"], [os.path.join( SKINPATH, DATA.slugify( "%s.%s" %( defaultIDFrom, str( i ) ), isSubLevel = True ) + ".DATA.xml" ).encode( "utf-8" ), "Copy"], [os.path.join( DEFAULTPATH, DATA.slugify( "%s.%s" %( defaultIDFrom, str( i ) ), isSubLevel = True ) + ".DATA.xml" ).encode( "utf-8" ), "Copy"]]
target = os.path.join( DATAPATH, DATA.slugify( "%s.%s" %( labelIDTo, str( i ) ), True, isSubLevel = True ) + ".DATA.xml" ).encode( "utf-8" )
target = try_decode( target )
for path in paths:
path[0] = try_decode( path[0] )
path[1] = try_decode( path[1] )
if path[1] == "New":
tree = xmltree.ElementTree( xmltree.Element( "shortcuts" ) )
tree.write( target, encoding="UTF-8" )
log( "Creating empty file - %s" %( target ) )
break
elif xbmcvfs.exists( path[0] ):
# The XML file exists
if path[1] == "Move":
if path[0] != target:
# Move the original to the target path
log( "Moving " + path[0] + " > " + target )
xbmcvfs.rename( path[0], target )
else:
# We're copying the file (actually, we'll re-write the file without
# any LOCKED elements and with icons/thumbs adjusted to absolute paths)
newtree = xmltree.parse( path[0] )
for newnode in newtree.getroot().findall( "shortcut" ):
searchNode = newnode.find( "locked" )
if searchNode is not None:
newnode.remove( searchNode )
# Write it to the target
DATA.indent( newtree.getroot() )
newtree.write( target, encoding="utf-8" )
log( "Copying " + path[0] + " > " + target )
# We'll need to import it's default properties, so save the groupName
copyDefaultProperties.append( groupName )
break
labelIDChanges.pop( 0 )
# Save widgets, backgrounds and custom properties
self._save_properties( properties, labelIDChangesDict, copyDefaultProperties )
# Note that we've saved stuff
xbmcgui.Window( 10000 ).setProperty( "skinshortcuts-reloadmainmenu", "True" )
def hasSaveWithProperty( self, listitem ):
for propertyName in self.saveWithProperty:
if listitem.getProperty( propertyName ) != "":
return True
return False
def _save_properties( self, properties, labelIDChanges, copyDefaults ):
# Save all additional properties (widgets, backgrounds, custom)
log( "Saving properties" )
currentProperties = []
# Get previously loaded properties
path = os.path.join( DATAPATH , xbmc.getSkinDir().decode('utf-8') + ".properties" )
if xbmcvfs.exists( path ):
# The properties file exists, load from it
listProperties = eval( xbmcvfs.File( path ).read() )
for listProperty in listProperties:
# listProperty[0] = groupname
# listProperty[1] = labelID
# listProperty[2] = property name
# listProperty[3] = property value
currentProperties.append( [listProperty[0], listProperty[1], listProperty[2], listProperty[3]] )
# Copy any items not in the current group to the array we'll save, and
# make any labelID changes whilst we're at it
saveData = []
for property in currentProperties:
#[ groupname, itemLabelID, property, value ]
if not property[0] == self.group:
if property[0] in labelIDChanges.keys():
property[0] = labelIDChanges[property[0]]
elif "." in property[0] and property[ 0 ].rsplit( ".", 1 )[ 1 ].isdigit():
# Additional menu
groupName, groupValue = property[ 0 ].rsplit( ".", 1 )
if groupName in labelIDChanges.keys() and int( groupValue ) in range( 1, 6 ):
property[0] = "%s.%s" %( labelIDChanges[ groupName ], groupValue )
saveData.append( property )
# Add all the properties we've been passed
for property in properties:
# property[0] = labelID
for toSave in property[1]:
# toSave[0] = property name
# toSave[1] = property value
saveData.append( [ self.group, property[0], toSave[0], toSave[1] ] )
# Add any default properties
for group in copyDefaults:
for defaultProperty in DATA.defaultProperties:
#[ groupname, itemLabelID, property, value ]
if defaultProperty[ 0 ] == group:
saveData.append( [ group, defaultProperty[ 1 ], defaultProperty[ 2 ], defaultProperty[ 3 ] ] )
# Try to save the file
try:
f = xbmcvfs.File( os.path.join( DATAPATH , xbmc.getSkinDir().decode('utf-8') + ".properties" ), 'w' )
f.write( repr( saveData ).replace( "],", "],\n" ) )
f.close()
except:
print_exc()
log( "### ERROR could not save file %s" % DATAPATH )
# Clear saved properties in DATA, so it will pick up any new ones next time we load a file
DATA.currentProperties = None
def _load_overrides( self ):
# Load various overrides from the skin, most notably backgrounds and thumbnails
self.backgrounds = "LOADING"
self.thumbnails = "LOADING"
# Load skin overrides
tree = DATA._get_overrides_skin()
# Should we allow the user to select a playlist as a widget...
elem = tree.find('widgetPlaylists')
if elem is not None and elem.text == "True":
self.widgetPlaylists = True
if "type" in elem.attrib:
self.widgetPlaylistsType = elem.attrib.get( "type" )
# Get backgrounds and thumbnails - we do this in a separate thread as the json used to load VFS paths
# is very expensive
thread.start_new_thread( self._load_backgrounds_thumbnails, () )
# Should we allow the user to browse for background images...
elem = tree.find('backgroundBrowse')
if elem is not None and elem.text.lower() in ("true", "single", "multi"):
self.backgroundBrowse = elem.text.lower()
if "default" in elem.attrib:
self.backgroundBrowseDefault = elem.attrib.get( "default" )
# Find the default thumbnail browse directory
elem = tree.find("thumbnailBrowseDefault")
if elem is not None and len(elem.text) > 0:
self.thumbnailBrowseDefault = elem.text
# Should we allow the user to rename a widget?
elem = tree.find( "widgetRename" )
if elem is not None and elem.text.lower() == "false":
self.widgetRename = False
# Does the skin override GUI 308?
elem = tree.find( "alwaysReset" )
if elem is not None and elem.text.lower() == "true":
self.alwaysReset = True
elem = tree.find( "alwaysRestore" )
if elem is not None and elem.text.lower() == "true":
self.alwaysRestore = True
# Do we enable 'Get More...' button when browsing Skin Helper widgets
elem = tree.find( "defaultwidgetsGetMore" )
if elem is not None and elem.text.lower() == "false":
LIBRARY.skinhelperWidgetInstall = False
# Are there any controls we don't close the window on 'back' for?
for elem in tree.findall( "onback" ):
self.onBack[ int( elem.text ) ] = int( elem.attrib.get( "to" ) )
# Are there any custom properties that shortcuts should be saved if present
for elem in tree.findall( "saveWithProperty" ):
self.saveWithProperty.append( elem.text )
def _load_overrides_context( self ):
# Load context menu settings from overrides
# Check we're running Krypton or later - we don't support the context menu on earlier versions
if int( KODIVERSION ) <= 16:
return
for overrideType in [ "skin", "script" ]:
# Load overrides
if overrideType == "skin":
tree = DATA._get_overrides_skin()
else:
tree = DATA._get_overrides_script()
# Check if context menu overrides in tree
elem = tree.find( "contextmenu" )
if elem is None:
# It isn't
continue
# Get which controls the context menu is enabled on
for control in elem.findall( "enableon" ):
self.contextControls.append( int( control.text ) )
# Get the context menu items
for item in elem.findall( "item" ):
if "control" not in item.attrib:
# There's no control specified, so it's no use to us
continue
condition = None
if "condition" in item.attrib:
condition = item.attrib.get( "condition" )
self.contextItems.append( ( int( item.attrib.get( "control" ) ), condition, item.text ) )
# If we get here, we've loaded context options, so we're done
return
def _load_overrides_onclick( self ):
# Load additional onlcicks from overrides
# Get overrides
tree = DATA._get_overrides_skin()
# Get additional onclick handlers
for control in tree.findall( "onclick" ):
self.customOnClick[ int( control.get( "id" ) ) ] = control.text
def _load_backgrounds_thumbnails( self ):
# Load backgrounds (done in background thread)
backgrounds = []
thumbnails = []
# Load skin overrides
tree = DATA._get_overrides_skin()
# Get backgrounds
elems = tree.findall('background')
for elem in elems:
if "condition" in elem.attrib:
if not xbmc.getCondVisibility( elem.attrib.get( "condition" ) ):
continue
if elem.text.startswith("||BROWSE||"):
#we want to include images from a VFS path...
images = LIBRARY.getImagesFromVfsPath(elem.text.replace("||BROWSE||",""))
for image in images:
backgrounds.append( [image[0], image[1] ] )
elif "icon" in elem.attrib:
backgrounds.append( [elem.attrib.get( "icon" ), DATA.local( elem.attrib.get( 'label' ) )[2] ] )
else:
backgrounds.append( [elem.text, DATA.local( elem.attrib.get( 'label' ) )[2] ] )
self.backgrounds = backgrounds
# Get thumbnails
elems = tree.findall('thumbnail')
for elem in elems:
if "condition" in elem.attrib:
if not xbmc.getCondVisibility( elem.attrib.get( "condition" ) ):
continue
if elem.text.startswith("||BROWSE||"):
#we want to include images from a VFS path...
images = LIBRARY.getImagesFromVfsPath(elem.text.replace("||BROWSE||",""))
for image in images:
thumbnails.append( [image[0], image[1] ] )
elif elem.text == "::NONE::":
if "label" in elem.attrib:
self.thumbnailNone = elem.attrib.get( "label" )
else:
self.thumbnailNone = "231"
else:
thumbnails.append( [elem.text, DATA.local( elem.attrib.get( 'label' ) )[2] ] )
self.thumbnails = thumbnails
def _load_customPropertyButtons( self ):
# Load a list of addition button IDs we'll handle for setting additional properties
# Load skin overrides
tree = DATA._get_overrides_skin()
for elem in tree.findall( "propertySettings" ):
if "buttonID" in elem.attrib and "property" in elem.attrib:
self.customPropertyButtons[ int( elem.attrib.get( "buttonID" ) ) ] = elem.attrib.get( "property" )
elif "buttonID" in elem.attrib and "toggle" in elem.attrib:
self.customToggleButtons[ int( elem.attrib.get( "buttonID" ) ) ] = elem.attrib.get( "toggle" )
# ========================
# === GUI INTERACTIONS ===
# ========================
def onClick(self, controlID):
if controlID == 102:
# Move to previous type of shortcuts
self.shortcutgroup = self.shortcutgroup - 1
if self.shortcutgroup == 0:
self.shortcutgroup = LIBRARY.flatGroupingsCount()
self._display_shortcuts()
elif controlID == 103:
# Move to next type of shortcuts
self.shortcutgroup = self.shortcutgroup + 1
if self.shortcutgroup > LIBRARY.flatGroupingsCount():
self.shortcutgroup = 1
self._display_shortcuts()
elif controlID == 111:
# User has selected an available shortcut they want in their menu
log( "Select shortcut (111)" )
listControl = self.getControl( 211 )
itemIndex = listControl.getSelectedPosition()
orderIndex = int( listControl.getListItem( itemIndex ).getProperty( "skinshortcuts-orderindex" ) )
altAction = None
if self.warnonremoval( listControl.getListItem( itemIndex ) ) == False:
return
# Copy the new shortcut
selectedItem = self.getControl( 111 ).getSelectedItem()
listitemCopy = self._duplicate_listitem( selectedItem, listControl.getListItem( itemIndex ) )
path = listitemCopy.getProperty( "path" )
if path.startswith( "||BROWSE||" ):
# If this is a plugin, call our plugin browser
returnVal = LIBRARY.explorer( ["plugin://" + path.replace( "||BROWSE||", "" )], "plugin://" + path.replace( "||BROWSE||", "" ), [self.getControl( 111 ).getSelectedItem().getLabel()], [self.getControl( 111 ).getSelectedItem().getProperty("thumbnail")], self.getControl( 111 ).getSelectedItem().getProperty("shortcutType") )
if returnVal is not None:
# Convert backslashes to double-backslashes (windows fix)
newAction = returnVal.getProperty( "Path" )
newAction = newAction.replace( "\\", "\\\\" )
returnVal.setProperty( "path", newAction )
returnVal.setProperty( "displayPath", newAction )
listitemCopy = self._duplicate_listitem( returnVal, listControl.getListItem( itemIndex ) )
else:
listitemCopy = None
elif path == "||UPNP||":
returnVal = LIBRARY.explorer( ["upnp://"], "upnp://", [self.getControl( 111 ).getSelectedItem().getLabel()], [self.getControl( 111 ).getSelectedItem().getProperty("thumbnail")], self.getControl( 111 ).getSelectedItem().getProperty("shortcutType") )
if returnVal is not None:
listitemCopy = self._duplicate_listitem( returnVal, listControl.getListItem( itemIndex ) )
else:
listitemCopy = None
elif path.startswith( "||SOURCE||" ):
returnVal = LIBRARY.explorer( [path.replace( "||SOURCE||", "" )], path.replace( "||SOURCE||", "" ), [self.getControl( 111 ).getSelectedItem().getLabel()], [self.getControl( 111 ).getSelectedItem().getProperty("thumbnail")], self.getControl( 111 ).getSelectedItem().getProperty("shortcutType") )
if returnVal is not None:
if "upnp://" in returnVal.getProperty( "Path" ):
listitemCopy = self._duplicate_listitem( returnVal, listControl.getListItem( itemIndex ) )
else:
returnVal = LIBRARY._sourcelink_choice( returnVal )
if returnVal is not None:
listitemCopy = self._duplicate_listitem( returnVal, listControl.getListItem( itemIndex ) )
else:
listitemCopy = None
else:
listitemCopy = None
elif path.startswith( "::PLAYLIST" ):
log( "Selected playlist" )
if not ">" in path or "VideoLibrary" in path:
# Give the user the choice of playing or displaying the playlist
dialog = xbmcgui.Dialog()
userchoice = dialog.yesno( LANGUAGE( 32040 ), LANGUAGE( 32060 ), "", "", LANGUAGE( 32061 ), LANGUAGE( 32062 ) )
# False: Display
# True: Play
if not userchoice:
listitemCopy.setProperty( "path", selectedItem.getProperty( "action-show" ) )
listitemCopy.setProperty( "displayPath", selectedItem.getProperty( "action-show" ) )
else:
listitemCopy.setProperty( "path", selectedItem.getProperty( "action-play" ) )
listitemCopy.setProperty( "displayPath", selectedItem.getProperty( "action-play" ) )
elif ">" in path:
# Give the user the choice of playing, displaying or party more for the playlist
dialog = xbmcgui.Dialog()
userchoice = dialog.select( LANGUAGE( 32060 ), [ LANGUAGE( 32061 ), LANGUAGE( 32062 ), xbmc.getLocalizedString( 589 ) ] )
# 0 - Display
# 1 - Play
# 2 - Party mode
if not userchoice or userchoice == 0:
listitemCopy.setProperty( "path", selectedItem.getProperty( "action-show" ) )
listitemCopy.setProperty( "displayPath", selectedItem.getProperty( "action-show" ) )
elif userchoice == 1:
listitemCopy.setProperty( "path", selectedItem.getProperty( "action-play" ) )
listitemCopy.setProperty( "displayPath", selectedItem.getProperty( "action-play" ) )
else:
listitemCopy.setProperty( "path", selectedItem.getProperty( "action-party" ) )
listitemCopy.setProperty( "displayPath", selectedItem.getProperty( "action-party" ) )
if listitemCopy is None:
# Nothing was selected in the explorer
return
self.changeMade = True
# Replace the allListItems listitem with our new list item
self.allListItems[ orderIndex ] = listitemCopy
# Delete playlist (TO BE REMOVED!)
LIBRARY._delete_playlist( listControl.getListItem( itemIndex ).getProperty( "path" ) )
# Display list items
self._display_listitems( focus = itemIndex )
elif controlID in [301, 1301]:
# Add a new item
log( "Add item (301)" )
self.changeMade = True
listControl = self.getControl( 211 )
num = listControl.getSelectedPosition()
orderIndex = int( listControl.getListItem( num ).getProperty( "skinshortcuts-orderindex" ) ) + 1
# Set default label and action
listitem = xbmcgui.ListItem( LANGUAGE(32013) )
listitem.setProperty( "Path", 'noop' )
listitem.setProperty( "additionalListItemProperties", "[]" )
# Add fallback custom property values
self._add_additional_properties( listitem )
# Add new item to both displayed list and list kept in memory
self.allListItems.insert( orderIndex, listitem )
self._display_listitems( num + 1 )
# If Control 1301 is used we want to add a new item and immediately select a shortcut
if controlID == 1301:
xbmc.executebuiltin('SendClick(401)')
elif controlID == 302:
# Delete an item
log( "Delete item (302)" )
listControl = self.getControl( 211 )
num = listControl.getSelectedPosition()
orderIndex = int( listControl.getListItem( num ).getProperty( "skinshortcuts-orderindex" ) )
if self.warnonremoval( listControl.getListItem( num ) ) == False:
return
LIBRARY._delete_playlist( listControl.getListItem( num ).getProperty( "path" ) )
self.changeMade = True
# Remove item from memory list, and reload all list items
self.allListItems.pop( orderIndex )
self._display_listitems( num )
elif controlID == 303:
# Move item up in list
log( "Move up (303)" )
listControl = self.getControl( 211 )
itemIndex = listControl.getSelectedPosition()
orderIndex = int( listControl.getListItem( itemIndex ).getProperty( "skinshortcuts-orderindex" ) )
if itemIndex == 0:
# Top item, can't move it up
return
self.changeMade = True
while True:
# Move the item one up in the list
self.allListItems[ orderIndex - 1 ], self.allListItems[ orderIndex ] = self.allListItems[ orderIndex ], self.allListItems[ orderIndex - 1 ]
# If we've just moved to the top of the list, break
if orderIndex == 1:
break
# Check if the item we've just swapped is visible
shouldBreak = True
if self.allListItems[ orderIndex ].getProperty( "visible-condition" ):
shouldBreak = xbmc.getCondVisibility( self.allListItems[ orderIndex ].getProperty( "visible-condition" ) )
if shouldBreak:
break
orderIndex -= 1
# Display the updated order
self._display_listitems( itemIndex - 1 )
elif controlID == 304:
# Move item down in list
log( "Move down (304)" )
listControl = self.getControl( 211 )
itemIndex = listControl.getSelectedPosition()
orderIndex = int( listControl.getListItem( itemIndex ).getProperty( "skinshortcuts-orderindex" ) )
log( str( itemIndex ) + " : " + str( listControl.size() ) )
if itemIndex == listControl.size() - 1:
return
self.changeMade = True
while True:
# Move the item one up in the list
self.allListItems[ orderIndex + 1 ], self.allListItems[ orderIndex ] = self.allListItems[ orderIndex ], self.allListItems[ orderIndex + 1 ]
# If we've just moved to the top of the list, break
if orderIndex == len( self.allListItems ) - 1:
break
# Check if the item we've just swapped is visible
shouldBreak = True
if self.allListItems[ orderIndex ].getProperty( "visible-condition" ):
shouldBreak = xbmc.getCondVisibility( self.allListItems[ orderIndex ].getProperty( "visible-condition" ) )
if shouldBreak:
break
orderIndex += 1
# Display the updated order
self._display_listitems( itemIndex + 1 )
elif controlID == 305:
# Change label
log( "Change label (305)" )
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
# Retreive current label and labelID
label = listitem.getLabel()
oldlabelID = listitem.getProperty( "labelID" )
# If the item is blank, set the current label to empty
if try_decode( label ) == LANGUAGE(32013):
label = ""
# Get new label from keyboard dialog
if is_hebrew(label):
label = label.decode('utf-8')[::-1]
keyboard = xbmc.Keyboard( label, xbmc.getLocalizedString(528), False )
keyboard.doModal()
if ( keyboard.isConfirmed() ):
label = keyboard.getText()
if label == "":
label = LANGUAGE(32013)
else:
return
self.changeMade = True
self._set_label( listitem, label )
elif controlID == 306:
# Change thumbnail
log( "Change thumbnail (306)" )
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
# Get new thumbnail from browse dialog
dialog = xbmcgui.Dialog()
custom_thumbnail = dialog.browse( 2 , xbmc.getLocalizedString(1030), 'files', '', True, False, self.thumbnailBrowseDefault)
if custom_thumbnail:
# Update the thumbnail
self.changeMade = True
listitem.setThumbnailImage( custom_thumbnail )
listitem.setProperty( "thumbnail", custom_thumbnail )
else:
return
elif controlID == 307:
# Change Action
log( "Change action (307)" )
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
if self.warnonremoval( listitem ) == False:
return
# Retrieve current action
action = listitem.getProperty( "path" )
if action == "noop":
action = ""
if self.currentWindow.getProperty( "custom-grouping" ):
selectedShortcut = LIBRARY.selectShortcut(custom = True, currentAction = listitem.getProperty("path"), grouping = self.currentWindow.getProperty( "custom-grouping" ))
self.currentWindow.clearProperty( "custom-grouping" )
else:
selectedShortcut = LIBRARY.selectShortcut(custom = True, currentAction = listitem.getProperty("path"))
if not selectedShortcut:
# User cancelled
return
if selectedShortcut.getProperty( "chosenPath" ):
action = try_decode( selectedShortcut.getProperty( "chosenPath" ) )
elif selectedShortcut.getProperty( "path" ):
action = try_decode(selectedShortcut.getProperty( "path" ))
if action == "":
action = "noop"
if listitem.getProperty( "path" ) == action:
return
self.changeMade = True
LIBRARY._delete_playlist( listitem.getProperty( "path" ) )
# Update the action
listitem.setProperty( "path", action )
listitem.setProperty( "displaypath", action )
listitem.setLabel2( LANGUAGE(32024) )
listitem.setProperty( "shortcutType", "32024" )
elif controlID == 308:
# Reset shortcuts
log( "Reset shortcuts (308)" )
# Ask the user if they want to restore a shortcut, or reset to skin defaults
if self.alwaysReset:
# The skin has disable the restore function, so set response as if user has chose the reset to
# defaults option
response = 1
elif self.alwaysRestore:
# The skin has disabled the reset function, so set response as if the user has chosen to restore
# a skin-default shortcut
response = 0
else:
# No skin override, so let user decide to restore or reset
if not DATA.checkIfMenusShared():
# Also offer to import from another skin
response = xbmcgui.Dialog().select( LANGUAGE(32102), [ LANGUAGE(32103), LANGUAGE(32104), "Import from compatible skin" ] )
else:
response = xbmcgui.Dialog().select( LANGUAGE(32102), [ LANGUAGE(32103), LANGUAGE(32104) ] )
if response == -1:
# User cancelled
return
elif response == 0:
# We're going to restore a particular shortcut
restorePretty = []
restoreItems = []
# Save the labelID list from DATA
originalLabelIDList = DATA.labelIDList
DATA.labelIDList = []
# Get a list of all shortcuts that were originally in the menu and restore labelIDList
DATA._clear_labelID()
shortcuts = DATA._get_shortcuts( self.group, defaultGroup = self.defaultGroup, defaultsOnly = True )
DATA.labelIDList = originalLabelIDList
for shortcut in shortcuts.getroot().findall( "shortcut" ):
# Parse the shortcut
item = self._parse_shortcut( shortcut )
# Check if a shortcuts labelID is already in the list
if item[1].getProperty( "labelID" ) not in DATA.labelIDList:
restorePretty.append( LIBRARY._create(["", item[ 1 ].getLabel(), item[1].getLabel2(), { "icon": item[1].getProperty( "icon" ) }] ) )
restoreItems.append( item[1] )
if len( restoreItems ) == 0:
xbmcgui.Dialog().ok( LANGUAGE(32103), LANGUAGE(32105) )
return
# Let the user select a shortcut to restore
w = library.ShowDialog( "DialogSelect.xml", CWD, listing=restorePretty, windowtitle=LANGUAGE(32103) )
w.doModal()
restoreShortcut = w.result
del w
if restoreShortcut == -1:
# User cancelled
return
# We now have our shortcut to return. Add it to self.allListItems and labelID list
self.allListItems.append( restoreItems[ restoreShortcut ] )
DATA.labelIDList.append( restoreItems[ restoreShortcut ].getProperty( "labelID" ) )
self.changeMade = True
self._display_listitems()
elif response == 1:
# We're going to reset all the shortcuts
self.changeMade = True
# Delete any auto-generated source playlists
for x in range(0, self.getControl( 211 ).size()):
LIBRARY._delete_playlist( self.getControl( 211 ).getListItem( x ).getProperty( "path" ) )
self.getControl( 211 ).reset()
self.allListItems = []
# Call the load shortcuts function, but add that we don't want
# previously saved user shortcuts
self.load_shortcuts( False )
else:
# We're going to offer to import menus from another compatible skin
skinList, sharedFiles = DATA.getSharedSkinList()
if len( skinList ) == 0:
xbmcgui.Dialog().ok( LANGUAGE(32110), LANGUAGE(32109) )
return
# Let the user select a shortcut to restore
importMenu = xbmcgui.Dialog().select( LANGUAGE(32110), skinList )
if importMenu == -1:
# User cancelled
return
# Delete any auto-generated source playlists
for x in range(0, self.getControl( 211 ).size()):
LIBRARY._delete_playlist( self.getControl( 211 ).getListItem( x ).getProperty( "path" ) )
if importMenu == 0 and not len( sharedFiles ) == 0:
# User has chosen to import the shared menu
DATA.importSkinMenu( sharedFiles )
else:
# User has chosen to import from a particular skin
DATA.importSkinMenu( DATA.getFilesForSkin( skinList[ importMenu ] ), skinList[ importMenu ] )
self.getControl( 211 ).reset()
self.allListItems = []
# Call the load shortcuts function
self.load_shortcuts( True )
elif controlID == 309:
# Choose widget
log( "Warning: Deprecated control 309 (Choose widget) selected")
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
# Check that widgets have been loaded
LIBRARY.loadLibrary( "widgets" )
# If we're setting for an additional widget, get it's number
widgetID = ""
if self.currentWindow.getProperty( "widgetID" ):
widgetID += "." + self.currentWindow.getProperty( "widgetID" )
self.currentWindow.clearProperty( "widgetID" )
# Get the default widget for this item
defaultWidget = self.find_default( "widget", listitem.getProperty( "labelID" ), listitem.getProperty( "defaultID" ) )
# Generate list of widgets for select dialog
widget = [""]
widgetLabel = [LANGUAGE(32053)]
widgetName = [""]
widgetType = [ None ]
for key in LIBRARY.dictionaryGroupings[ "widgets-classic" ]:
widget.append( key[0] )
widgetName.append( "" )
widgetType.append( key[2] )
if key[0] == defaultWidget:
widgetLabel.append( key[1] + " (%s)" %( LANGUAGE(32050) ) )
else:
widgetLabel.append( key[1] )
# If playlists have been enabled for widgets, add them too
if self.widgetPlaylists:
# Ensure playlists are loaded
LIBRARY.loadLibrary( "playlists" )
# Add them
for playlist in LIBRARY.widgetPlaylistsList:
widget.append( "::PLAYLIST::" + playlist[0] )
widgetLabel.append( playlist[1] )
widgetName.append( playlist[2] )
widgetType.append( self.widgetPlaylistsType )
for playlist in LIBRARY.scriptPlaylists():
widget.append( "::PLAYLIST::" + playlist[0] )
widgetLabel.append( playlist[1] )
widgetName.append( playlist[2] )
widgetType.append( self.widgetPlaylistsType )
# Show the dialog
selectedWidget = xbmcgui.Dialog().select( LANGUAGE(32044), widgetLabel )
if selectedWidget == -1:
# User cancelled
return
elif selectedWidget == 0:
# User selected no widget
self._remove_additionalproperty( listitem, "widget" + widgetID )
self._remove_additionalproperty( listitem, "widgetName" + widgetID )
self._remove_additionalproperty( listitem, "widgetType" + widgetID )
self._remove_additionalproperty( listitem, "widgetPlaylist" + widgetID )
else:
if widget[selectedWidget].startswith( "::PLAYLIST::" ):
self._add_additionalproperty( listitem, "widget" + widgetID, "Playlist" )
self._add_additionalproperty( listitem, "widgetName" + widgetID, widgetName[selectedWidget] )
self._add_additionalproperty( listitem, "widgetPlaylist" + widgetID, widget[selectedWidget].strip( "::PLAYLIST::" ) )
if self.currentWindow.getProperty( "useWidgetNameAsLabel" ) == "true" and widgetID == "":
self._set_label( listitem, widgetName[selectedWidget] )
self.currentWindow.clearProperty( "useWidgetNameAsLabel" )
else:
self._add_additionalproperty( listitem, "widgetName" + widgetID, widgetLabel[selectedWidget].replace( " (%s)" %( LANGUAGE(32050) ), "" ) )
self._add_additionalproperty( listitem, "widget" + widgetID, widget[selectedWidget] )
self._remove_additionalproperty( listitem, "widgetPlaylist" + widgetID )
if self.currentWindow.getProperty( "useWidgetNameAsLabel" ) == "true" and widgetID == "":
self._set_label( listitem, widgetLabel[selectedWidget].replace( " (%s)" %( LANGUAGE(32050) ), "" ) )
self.currentWindow.clearProperty( "useWidgetNameAsLabel" )
if widgetType[ selectedWidget ] is not None:
self._add_additionalproperty( listitem, "widgetType" + widgetID, widgetType[ selectedWidget] )
else:
self._remove_additionalproperty( listitem, "widgetType" + widgetID )
self.changeMade = True
elif controlID == 312:
# Alternative widget select
log( "Choose widget (312)" )
listControl = self.getControl( 211 )
listitem = listControl.getSelectedItem()
# If we're setting for an additional widget, get its number
widgetID = ""
if self.currentWindow.getProperty( "widgetID" ):
widgetID = "." + self.currentWindow.getProperty( "widgetID" )
self.currentWindow.clearProperty( "widgetID" )
# Get the default widget for this item
defaultWidget = self.find_default( "widget", listitem.getProperty( "labelID" ), listitem.getProperty( "defaultID" ) )
# Ensure widgets are loaded
LIBRARY.loadLibrary( "widgets" )
# Let user choose widget
if listitem.getProperty( "widgetPath" ) == "":
selectedShortcut = LIBRARY.selectShortcut( grouping = "widget", showNone = True )
else:
selectedShortcut = LIBRARY.selectShortcut( grouping = "widget", showNone = True, custom = True, currentAction = listitem.getProperty( "widgetPath" ) )
if selectedShortcut is None:
# User cancelled
return
if selectedShortcut.getProperty( "Path" ) and selectedShortcut.getProperty( "custom" ) == "true":
# User has manually edited the widget path, so we'll update that property only
self._add_additionalproperty( listitem, "widgetPath" + widgetID, selectedShortcut.getProperty( "Path" ) )
self.changeMade = True
elif selectedShortcut.getProperty( "Path" ):
# User has chosen a widget
# Let user edit widget title, if they want & skin hasn't disabled it
widgetName = selectedShortcut.getProperty( "widgetName" )
if self.widgetRename:
if widgetName.startswith("$"):
| widgetTempName = xbmc.getInfoLabel(widgetName) | 6,955 | lcc_e | python | null | 45b4cb0f1e6ff6cf19ff04c4dc78debf5eb295f9fb1033e7 |
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True]
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False]
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here can
# be used to do many common tasks
import locale
import os
import re
import pipes
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from itertools import repeat, chain
try:
import syslog
HAS_SYSLOG=True
except ImportError:
HAS_SYSLOG=False
try:
# Python 2
from itertools import imap
except ImportError:
# Python 3
imap = map
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = str
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = str
try:
# Python 2.6+
bytes
except NameError:
# Python 2.4
bytes = str
try:
dict.iteritems
except AttributeError:
# Python 3
def iteritems(d):
return d.items()
else:
# Python 2
def iteritems(d):
return d.iteritems()
try:
reduce
except NameError:
# Python 3
from functools import reduce
try:
NUMBERTYPES = (int, long, float)
except NameError:
# Python 3
NUMBERTYPES = (int, float)
# Python2 & 3 way to get NoneType
NoneType = type(None)
try:
from collections import Sequence, Mapping
except ImportError:
# python2.5
Sequence = (list, tuple)
Mapping = (dict,)
try:
from collections.abc import KeysView
SEQUENCETYPE = (Sequence, KeysView)
except:
SEQUENCETYPE = Sequence
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
from ansible.module_utils.six import PY2, PY3, b, binary_type, text_type, string_types
HAVE_SELINUX=False
try:
import selinux
HAVE_SELINUX=True
except ImportError:
pass
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
try:
from ast import literal_eval
except ImportError:
# a replacement for literal_eval that works with python 2.4. from:
# https://mail.python.org/pipermail/python-list/2009-September/551880.html
# which is essentially a cut/paste from an earlier (2.6) version of python's
# ast.py
from compiler import ast, parse
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, ast.Expression):
node_or_string = node_or_string.node
def _convert(node):
if isinstance(node, ast.Const) and isinstance(node.value, (basestring, int, float, long, complex)):
return node.value
elif isinstance(node, ast.Tuple):
return tuple(map(_convert, node.nodes))
elif isinstance(node, ast.List):
return list(map(_convert, node.nodes))
elif isinstance(node, ast.Dict):
return dict((_convert(k), _convert(v)) for k, v in node.items())
elif isinstance(node, ast.Name):
if node.name in _safe_names:
return _safe_names[node.name]
elif isinstance(node, ast.UnarySub):
return -_convert(node.expr)
raise ValueError('malformed string')
return _convert(node_or_string)
_literal_eval = literal_eval
# Backwards compat. There were present in basic.py before
from ansible.module_utils.pycompat24 import get_exception
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS=dict(
src = dict(),
mode = dict(type='raw'),
owner = dict(),
group = dict(),
seuser = dict(),
serole = dict(),
selevel = dict(),
setype = dict(),
follow = dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content = dict(no_log=True),
backup = dict(),
force = dict(),
remote_src = dict(), # used by assemble
regexp = dict(), # used by assemble
delimiter = dict(), # used by assemble
directory_mode = dict(), # used by copy
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Can't use 07777 on Python 3, can't use 0o7777 on Python 2.4
PERM_BITS = int('07777', 8) # file mode permission bits
EXEC_PERM_BITS = int('00111', 8) # execute permission bits
DEFAULT_PERM = int('0666', 8) # default file permission bits
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch',)
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, unicode):
return d.encode(encoding)
elif isinstance(d, dict):
return dict(imap(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding)))
elif isinstance(d, list):
return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
elif isinstance(d, tuple):
return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, bytes):
return unicode(d, encoding)
elif isinstance(d, dict):
return dict(imap(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding)))
elif isinstance(d, list):
return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
elif isinstance(d, tuple):
return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
else:
return d
def return_values(obj):
""" Return stringified values from datastructures. For use with removing
sensitive values pre-jsonification."""
if isinstance(obj, basestring):
if obj:
if isinstance(obj, bytes):
yield obj
else:
# Unicode objects should all convert to utf-8
# (still must deal with surrogateescape on python3)
yield obj.encode('utf-8')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield str(obj)
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
if isinstance(value, basestring):
if isinstance(value, unicode):
# This should work everywhere on python2. Need to check
# surrogateescape on python3
bytes_value = value.encode('utf-8')
value_is_unicode = True
else:
bytes_value = value
value_is_unicode = False
if bytes_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
bytes_value = bytes_value.replace(omit_me, '*' * 8)
if value_is_unicode:
value = unicode(bytes_value, 'utf-8', errors='replace')
else:
value = bytes_value
elif isinstance(value, SEQUENCETYPE):
return [remove_values(elem, no_log_strings) for elem in value]
elif isinstance(value, Mapping):
return dict((k, remove_values(v, no_log_strings)) for k, v in value.items())
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = str(value)
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
else:
raise AnsibleFallbackNotFound
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.no_log = no_log
self.cleanup_files = []
self._debug = False
self._diff = False
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity', '_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility']
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception:
e = get_exception()
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % str(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in self.argument_spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = self.params.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
if not self.no_log and self._verbosity >= 3:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(path)
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(path):
path = os.path.realpath(path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc,out,err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
def _to_filesystem_str(self, path):
'''Returns filesystem path as a str, if it wasn't already.
Used in selinux interactions because it cannot accept unicode
instances, and specifying complex args in a playbook leaves
you with unicode instances. This method currently assumes
that your filesystem encoding is UTF-8.
'''
if isinstance(path, unicode):
path = path.encode("utf-8")
return path
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(self._to_filesystem_str(path), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(self._to_filesystem_str(path))
except OSError:
e = get_exception()
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, filename):
filename = os.path.expanduser(filename)
st = os.lstat(filename)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path = os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(self._to_filesystem_str(path),
str(':'.join(new_context)))
except OSError:
e = get_exception()
self.fail_json(path=path, msg='invalid selinux context: %s' % str(e), new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None):
path = os.path.expanduser(path)
if owner is None:
return changed
orig_uid, orig_gid = self.user_and_group(path)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(path, uid, -1)
except OSError:
self.fail_json(path=path, msg='chown failed')
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None):
path = os.path.expanduser(path)
if group is None:
return changed
orig_uid, orig_gid = self.user_and_group(path)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(path, -1, gid)
except OSError:
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None):
path = os.path.expanduser(path)
path_stat = os.lstat(path)
if mode is None:
return changed
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception:
e = get_exception()
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=str(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = oct(prev_mode)
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = oct(mode)
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(path, mode)
else:
if not os.path.islink(path):
os.chmod(path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(path)
os.chmod(path, mode)
new_underlying_stat = os.stat(path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(path, stat.S_IMODE(underlying_stat.st_mode))
except OSError:
e = get_exception()
if os.path.islink(path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise e
except Exception:
e = get_exception()
self.fail_json(path=path, msg='chmod failed', details=str(e))
path_stat = os.lstat(path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def _symbolic_mode_to_octal(self, path_stat, symbolic_mode):
new_mode = stat.S_IMODE(path_stat.st_mode)
mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst-]*|[ugo])$')
for mode in symbolic_mode.split(','):
match = mode_re.match(mode)
if match:
users = match.group('users')
operator = match.group('operator')
perms = match.group('perms')
if users == 'a':
users = 'ugo'
for user in users:
mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms)
new_mode = self._apply_operation_to_mode(user, operator, mode_to_apply, new_mode)
else:
raise ValueError("bad symbolic permission for mode: %s" % mode)
return new_mode
def _apply_operation_to_mode(self, user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u': mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g': mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o': mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH}
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0}
}
user_perms_to_modes = {
'u': {
'r': stat.S_IRUSR,
'w': stat.S_IWUSR,
'x': stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6 },
'g': {
'r': stat.S_IRGRP,
'w': stat.S_IWGRP,
'x': stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3 },
'o': {
'r': stat.S_IROTH,
'w': stat.S_IWOTH,
'x': stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO }
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff
)
return changed
def set_directory_attributes_if_different(self, file_args, changed, diff=None):
return self.set_fs_attributes_if_different(file_args, changed, diff)
def set_file_attributes_if_different(self, file_args, changed, diff=None):
return self.set_fs_attributes_if_different(file_args, changed, diff)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
if os.path.exists(path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(path)
kwargs['mode'] = oct(stat.S_IMODE(st[stat.ST_MODE]))
# secontext not yet supported
if os.path.islink(path):
kwargs['state'] = 'link'
elif os.path.isdir(path):
kwargs['state'] = 'directory'
elif os.stat(path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception:
e = get_exception()
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
def _handle_aliases(self):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} #alias:canon
for (k,v) in self.argument_spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if type(aliases) != list:
raise Exception('internal error: aliases must be a list')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in self.params:
self.params[k] = self.params[alias]
return aliases_results
def _check_arguments(self, check_invalid_arguments):
self._syslog_facility = 'LOG_USER'
for (k,v) in list(self.params.items()):
if k == '_ansible_check_mode' and v:
self.check_mode = True
elif k == '_ansible_no_log':
self.no_log = self.boolean(v)
elif k == '_ansible_debug':
self._debug = self.boolean(v)
elif k == '_ansible_diff':
self._diff = self.boolean(v)
elif k == '_ansible_verbosity':
self._verbosity = v
elif k == '_ansible_selinux_special_fs':
self._selinux_special_fs = v
elif k == '_ansible_syslog_facility':
self._syslog_facility = v
elif k == '_ansible_version':
self.ansible_version = v
elif k == '_ansible_module_name':
self._name = v
elif check_invalid_arguments and k not in self._legal_inputs:
self.fail_json(msg="unsupported parameter for module: %s" % k)
#clean up internal params:
if k.startswith('_ansible_'):
del self.params[k]
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check):
count = 0
for term in check:
if term in self.params:
count += 1
return count
def _check_mutually_exclusive(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count > 1:
self.fail_json(msg="parameters are mutually exclusive: %s" % (check,))
def _check_required_one_of(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count == 0:
self.fail_json(msg="one of the following is required: %s" % ','.join(check))
def _check_required_together(self, spec):
if spec is None:
return
for check in spec:
counts = [ self._count_terms([field]) for field in check ]
non_zero = [ c for c in counts if c > 0 ]
if len(non_zero) > 0:
if 0 in counts:
self.fail_json(msg="parameters are required together: %s" % (check,))
def _check_required_arguments(self):
''' ensure all required arguments are present '''
missing = []
for (k,v) in self.argument_spec.items():
required = v.get('required', False)
if required and k not in self.params:
missing.append(k)
if len(missing) > 0:
self.fail_json(msg="missing required arguments: %s" % ",".join(missing))
def _check_required_if(self, spec):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
for (key, val, requirements) in spec:
missing = []
if key in self.params and self.params[key] == val:
for check in requirements:
count = self._count_terms((check,))
if count == 0:
missing.append(check)
if len(missing) > 0:
self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)))
def _check_argument_values(self):
''' ensure all arguments have the requested values, and there are no stray arguments '''
for (k,v) in self.argument_spec.items():
choices = v.get('choices',None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE):
if k in self.params:
if self.params[k] not in choices:
choices_str=",".join([str(c) for c in choices])
msg="value of %s must be one of: %s, got: %s" % (k, choices_str, self.params[k])
self.fail_json(msg=msg)
else:
self.fail_json(msg="internal error: choices for argument %s are not iterable: %s" % (k, choices))
def safe_eval(self, str, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(str, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (str, None)
return str
if re.search(r'\w\.\w+\(', str):
if include_exceptions:
return (str, None)
return str
# do not allow imports
if re.search(r'import \w+', str):
if include_exceptions:
return (str, None)
return str
try:
result = literal_eval(str)
if include_exceptions:
return (result, None)
else:
return result
except Exception:
e = get_exception()
if include_exceptions:
return (str, e)
return str
def _check_type_str(self, value):
if isinstance(value, basestring):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, basestring):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [ str(value) ]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, basestring):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, basestring) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, basestring):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, basestring):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (unicode, bytes)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return json.dumps(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_argument_types(self):
''' ensure all arguments have the requested type '''
for (k, v) in self.argument_spec.items():
wanted = v.get('type', None)
if k not in self.params:
continue
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if self.params[k] is None:
continue
wanted = 'str'
value = self.params[k]
if value is None:
continue
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
try:
self.params[k] = type_checker(value)
except (TypeError, ValueError):
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s" % (k, type(value), wanted))
def _set_defaults(self, pre=True):
for (k,v) in self.argument_spec.items():
default = v.get('default', None)
if pre == True:
# this prevents setting defaults on required items
if default is not None and k not in self.params:
self.params[k] = default
else:
# make sure things without a default still get set None
if k not in self.params:
self.params[k] = default
def _set_fallbacks(self):
for k,v in self.argument_spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in self.params and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
self.params[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log(msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, bytes):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (bytes, unicode)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, bytes):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
passwd_keys = ['password', 'login_password']
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
elif param in passwd_keys:
log_args[param] = 'NOT_LOGGING_PASSWORD'
else:
param_val = self.params[param]
if not isinstance(param_val, basestring):
param_val = str(param_val)
elif isinstance(param_val, unicode):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = []
for arg in log_args:
arg_val = log_args[arg]
if not isinstance(arg_val, basestring):
arg_val = str(arg_val)
elif isinstance(arg_val, unicode):
arg_val = arg_val.encode('utf-8')
msg.append('%s=%s' % (arg, arg_val))
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK|os.R_OK):
raise
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK|os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=[]):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s' % arg)
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None or type(arg) == bool:
return arg
if isinstance(arg, basestring):
arg = arg.lower()
if arg in BOOLEANS_TRUE:
return True
elif arg in BOOLEANS_FALSE:
return False
else:
self.fail_json(msg='Boolean %s not in either boolean list' % arg)
def jsonify(self, data):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data)
except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.add_path_info(kwargs)
if not 'changed' in kwargs:
kwargs['changed'] = False
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
self.do_cleanup_files()
print('\n%s' % self.jsonify(kwargs))
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
self.add_path_info(kwargs)
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
self.do_cleanup_files()
print('\n%s' % self.jsonify(kwargs))
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(filename, 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename-YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
shutil.copy2(fn, backupdest)
except (shutil.Error, IOError):
e = get_exception()
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError:
e = get_exception()
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e))
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
if os.path.exists(dest):
try:
dest_stat = os.stat(dest)
os.chmod(src, dest_stat.st_mode & PERM_BITS)
os.chown(src, dest_stat.st_uid, dest_stat.st_gid)
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(dest)
try:
login_name = os.getlogin()
except OSError:
# not having a tty can cause the above to fail, so
# just get the LOGNAME environment variable instead
login_name = os.environ.get('LOGNAME', None)
# if the original login_name doesn't match the currently
# logged-in user, or if the SUDO_USER environment variable
# is set, then this user has switched their credentials
switched_user = login_name and login_name != pwd.getpwuid(os.getuid())[0] or os.environ.get('SUDO_USER')
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(src, dest)
except (IOError, OSError):
e = get_exception()
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
else:
dest_dir = os.path.dirname(dest)
dest_file = os.path.basename(dest)
try:
tmp_dest = tempfile.NamedTemporaryFile(
prefix=".ansible_tmp", dir=dest_dir, suffix=dest_file)
except (OSError, IOError):
e = get_exception()
self.fail_json(msg='The destination directory (%s) is not writable by the current user. Error was: %s' % (dest_dir, e))
try: # leaves tmp file behind when sudo and not root
if switched_user and os.getuid() != 0:
# cleanup will happen by 'rm' of tempdir
# copy2 will preserve some metadata
shutil.copy2(src, tmp_dest.name)
else:
shutil.move(src, tmp_dest.name)
if self.selinux_enabled():
self.set_context_if_different(
tmp_dest.name, context, False)
try:
tmp_stat = os.stat(tmp_dest.name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(tmp_dest.name, dest_stat.st_uid, dest_stat.st_gid)
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
os.rename(tmp_dest.name, dest)
except (shutil.Error, OSError, IOError):
e = get_exception()
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
if unsafe_writes and e.errno == errno.EBUSY:
#TODO: issue warning that this is an unsafe operation, but doing it cause user insists
try:
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e))
else:
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
self.cleanup(tmp_dest.name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
| umask = os.umask(0) | 7,132 | lcc_e | python | null | bdc4619a8c7ccc8f1bd2cd02a0bd2f148097dbef786aed3d |
|
"""
Utility classes and functions to handle Virtual Machine creation using libvirt.
:copyright: 2011 Red Hat Inc.
"""
from __future__ import division
import time
import string
import os
import logging
import fcntl
import re
import shutil
import tempfile
import platform
import aexpect
from avocado.utils import process
from avocado.utils import crypto
from avocado.core import exceptions
from virttest import error_context
from virttest import utils_misc
from virttest import virt_vm
from virttest import storage
from virttest import remote
from virttest import virsh
from virttest import libvirt_xml
from virttest import data_dir
from virttest import xml_utils
from virttest import utils_selinux
from virttest import test_setup
from virttest import utils_package
from virttest.compat_52lts import results_stdout_52lts, results_stderr_52lts, decode_to_text
def normalize_connect_uri(connect_uri):
"""
Processes connect_uri Cartesian into something virsh can use
:param connect_uri: Cartesian Params setting
:return: Normalized connect_uri
"""
if connect_uri == "default":
result = virsh.canonical_uri()
else:
result = virsh.canonical_uri(uri=connect_uri)
if not result:
raise ValueError("Normalizing connect_uri '%s' failed, is libvirt "
"running?" % connect_uri)
return result
def complete_uri(ip_address, protocol=None, port=None):
"""
Return a complete URI with the combination of ip_address and local uri.
It is useful when you need to connect remote hypervisor.
:param ip_address: an ip address or a hostname
:param protocol: protocol for uri eg: tcp, spice etc.
:param port: port for the protocol
:return: a complete uri
"""
if protocol and port:
complete_uri = "%s://%s:%s" % (protocol, ip_address, port)
else:
# Allow to raise CmdError if canonical_uri is failed
uri = virsh.canonical_uri(ignore_status=False)
driver = uri.split(":")[0]
# The libvirtd daemon's mode(system or session on qemu)
daemon_mode = uri.split("/")[-1]
complete_uri = "%s+ssh://%s/%s" % (driver, ip_address, daemon_mode)
return complete_uri
def get_uri_with_transport(uri_type='qemu', transport="", dest_ip=""):
"""
Return a URI to connect driver on dest with a specified transport.
:param origin_uri: The URI on dest used to connect itself directly.
:param transport: The transport type connect to dest.
:param dest_ip: The ip of destination.
"""
_type2uri_ = {'qemu': "qemu:///system",
'qemu_system': "qemu:///system",
'qemu_session': "qemu:///session",
'lxc': "lxc:///",
'xen': "xen:///",
'esx': "esx:///"}
try:
origin_uri = _type2uri_[uri_type]
except KeyError:
raise ValueError("Param uri_type = %s is not supported." % (uri_type))
# For example:
# ("qemu:///system")-->("qemu", "system")
# ("lxc:///")-->("lxc", "")
origin_uri_elems = origin_uri.split(":///")
transport_uri_driver = origin_uri_elems[0]
transport_uri_dest = origin_uri_elems[-1]
if transport:
transport_uri_driver = ("%s+%s" % (transport_uri_driver, transport))
transport_uri_dest = ("://%s/%s" % (dest_ip, transport_uri_dest))
return ("%s%s" % (transport_uri_driver, transport_uri_dest))
class Monitor(object):
"""
This class handles qemu monitor commands from libvirt VM object
TODO: other methods supported from qemu_monitor have to be included
but still vm.monitor.command(cmd) can serve the purpose
"""
def __init__(self, name, protocol="--hmp"):
"""
Initialize the object and set a few attributes.
:param name: The name of the VM
:param protocol: qemu monitor protocol
"""
self.name = name
self.protocol = protocol
def command(self, cmd, **dargs):
"""
Interface to execute qemu command from libvirt VM
:param cmd: qemu monitor command to execute
:param dargs: standardized virsh function API keywords
:return: standard output from monitor command executed
"""
result = virsh.qemu_monitor_command(self.name, cmd,
options=self.protocol, **dargs)
if result.exit_status != 0:
raise exceptions.TestError("Failed to execute monitor cmd %s: %s"
% cmd, results_stderr_52lts(result))
return results_stderr_52lts(result)
def system_powerdown(self):
"""
Perform powerdown of guest using qemu monitor
"""
cmd = "system_powerdown"
return self.command(cmd, debug=True)
def get_status(self):
"""
Retrieve VM status information using qemu monitor
"""
cmd = "info status"
return self.command(cmd, debug=True)
class VM(virt_vm.BaseVM):
"""
This class handles all basic VM operations for libvirt.
"""
def __init__(self, name, params, root_dir, address_cache, state=None):
"""
Initialize the object and set a few attributes.
:param name: The name of the object
:param params: A dict containing VM params
(see method make_create_command for a full description)
:param root_dir: Base directory for relative filenames
:param address_cache: A dict that maps MAC addresses to IP addresses
:param state: If provided, use this as self.__dict__
"""
if state:
self.__dict__ = state
else:
self.process = None
self.serial_ports = []
self.serial_console_log = None
self.redirs = {}
self.vnc_port = None
self.vnc_autoport = True
self.pci_assignable = None
self.netdev_id = []
self.device_id = []
self.pci_devices = []
self.uuid = None
self.remote_sessions = []
self.spice_port = 8000
self.name = name
self.params = params
self.root_dir = root_dir
self.address_cache = address_cache
self.vnclisten = "0.0.0.0"
self.connect_uri = normalize_connect_uri(params.get("connect_uri",
"default"))
self.driver_type = virsh.driver(uri=self.connect_uri)
self.params['driver_type_' + self.name] = self.driver_type
self.monitor = Monitor(self.name)
# virtnet init depends on vm_type/driver_type being set w/in params
super(VM, self).__init__(name, params)
logging.info("Libvirt VM '%s', driver '%s', uri '%s'",
self.name, self.driver_type, self.connect_uri)
def is_lxc(self):
"""
Return True if VM is linux container.
"""
return (self.connect_uri and self.connect_uri.count("lxc"))
def is_qemu(self):
"""
Return True if VM is a qemu guest.
"""
return (self.connect_uri and self.connect_uri.count("qemu"))
def is_xen(self):
"""
Return True if VM is a xen guest.
"""
return (self.connect_uri and self.connect_uri.count("xen"))
def is_esx(self):
"""
Return True if VM is a esx guest.
"""
return (self.connect_uri and self.connect_uri.count("esx"))
def verify_alive(self):
"""
Make sure the VM is alive.
:raise VMDeadError: If the VM is dead
"""
if not self.is_alive():
raise virt_vm.VMDeadError("Domain %s is inactive" % self.name,
self.state())
def is_alive(self):
"""
Return True if VM is alive.
"""
return virsh.is_alive(self.name, uri=self.connect_uri)
def is_dead(self):
"""
Return True if VM is dead.
"""
return virsh.is_dead(self.name, uri=self.connect_uri)
def is_paused(self):
"""
Return True if VM is paused.
"""
return (self.state() == "paused")
def is_persistent(self):
"""
Return True if VM is persistent.
"""
try:
result = virsh.dominfo(self.name, uri=self.connect_uri)
dominfo = results_stdout_52lts(result).strip()
return bool(re.search(r"^Persistent:\s+[Yy]es", dominfo,
re.MULTILINE))
except process.CmdError:
return False
def is_autostart(self):
"""
Return True if VM is autostart.
"""
try:
result = virsh.dominfo(self.name, uri=self.connect_uri)
dominfo = results_stdout_52lts(result).strip()
return bool(re.search(r"^Autostart:\s+enable", dominfo,
re.MULTILINE))
except process.CmdError:
return False
def exists(self):
"""
Return True if VM exists.
"""
return virsh.domain_exists(self.name, uri=self.connect_uri)
def undefine(self, options=None):
"""
Undefine the VM.
"""
try:
virsh.undefine(self.name, options=options, uri=self.connect_uri,
ignore_status=False)
except process.CmdError as detail:
logging.error("Undefined VM %s failed:\n%s", self.name, detail)
return False
return True
def define(self, xml_file):
"""
Define the VM.
"""
if not os.path.exists(xml_file):
logging.error("File %s not found." % xml_file)
return False
try:
virsh.define(xml_file, uri=self.connect_uri,
ignore_status=False)
except process.CmdError as detail:
logging.error("Defined VM from %s failed:\n%s", xml_file, detail)
return False
return True
def state(self):
"""
Return domain state.
"""
result = virsh.domstate(self.name, uri=self.connect_uri)
return results_stdout_52lts(result).strip()
def get_id(self):
"""
Return VM's ID.
"""
result = virsh.domid(self.name, uri=self.connect_uri)
return results_stdout_52lts(result).strip()
def get_xml(self):
"""
Return VM's xml file.
"""
result = virsh.dumpxml(self.name, uri=self.connect_uri)
return results_stdout_52lts(result).strip()
def backup_xml(self, active=False):
"""
Backup the guest's xmlfile.
"""
# Since backup_xml() is not a function for testing,
# we have to handle the exception here.
try:
xml_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
if active:
extra = ""
else:
extra = "--inactive"
virsh.dumpxml(self.name, extra=extra,
to_file=xml_file, uri=self.connect_uri)
return xml_file
except Exception as detail:
if os.path.exists(xml_file):
os.remove(xml_file)
logging.error("Failed to backup xml file:\n%s", detail)
return ""
def clone(self, name=None, params=None, root_dir=None, address_cache=None,
copy_state=False):
"""
Return a clone of the VM object with optionally modified parameters.
The clone is initially not alive and needs to be started using create().
Any parameters not passed to this function are copied from the source
VM.
:param name: Optional new VM name
:param params: Optional new VM creation parameters
:param root_dir: Optional new base directory for relative filenames
:param address_cache: A dict that maps MAC addresses to IP addresses
:param copy_state: If True, copy the original VM's state to the clone.
Mainly useful for make_create_command().
"""
if name is None:
name = self.name
if params is None:
params = self.params.copy()
if root_dir is None:
root_dir = self.root_dir
if address_cache is None:
address_cache = self.address_cache
if copy_state:
state = self.__dict__.copy()
else:
state = None
return VM(name, params, root_dir, address_cache, state)
def make_create_command(self, name=None, params=None, root_dir=None):
"""
Generate a libvirt command line. All parameters are optional. If a
parameter is not supplied, the corresponding value stored in the
class attributes is used.
:param name: The name of the object
:param params: A dict containing VM params
:param root_dir: Base directory for relative filenames
:note: The params dict should contain:
mem -- memory size in MBs
cdrom -- ISO filename to use with the qemu -cdrom parameter
extra_params -- a string to append to the qemu command
shell_port -- port of the remote shell daemon on the guest
(SSH, Telnet or the home-made Remote Shell Server)
shell_client -- client program to use for connecting to the
remote shell daemon on the guest (ssh, telnet or nc)
x11_display -- if specified, the DISPLAY environment variable
will be be set to this value for the qemu process (useful for
SDL rendering)
images -- a list of image object names, separated by spaces
nics -- a list of NIC object names, separated by spaces
For each image in images:
drive_format -- string to pass as 'if' parameter for this
image (e.g. ide, scsi)
image_snapshot -- if yes, pass 'snapshot=on' to qemu for
this image
image_boot -- if yes, pass 'boot=on' to qemu for this image
In addition, all parameters required by get_image_filename.
For each NIC in nics:
nic_model -- string to pass as 'model' parameter for this
NIC (e.g. e1000)
"""
# helper function for command line option wrappers
def has_option(help_text, option):
return bool(re.search(r"--%s" % option, help_text, re.MULTILINE))
def has_os_variant(os_text, os_variant):
return bool(re.search(r"%s" % os_variant, os_text, re.MULTILINE))
def has_sub_option(option, sub_option):
option_help_text = decode_to_text(process.system_output("%s --%s help" %
(virt_install_binary, option),
verbose=False))
return bool(re.search(r"%s" % sub_option, option_help_text, re.MULTILINE))
# Wrappers for all supported libvirt command line parameters.
# This is meant to allow support for multiple libvirt versions.
# Each of these functions receives the output of 'libvirt --help' as a
# parameter, and should add the requested command line option
# accordingly.
def add_name(help_text, name):
return " --name '%s'" % name
def add_machine_type(help_text, machine_type):
if has_option(help_text, "machine"):
return " --machine %s" % machine_type
else:
return ""
def add_hvm_or_pv(help_text, hvm_or_pv):
if hvm_or_pv == "hvm":
return " --hvm --accelerate"
elif hvm_or_pv == "pv":
return " --paravirt"
else:
logging.warning("Unknown virt type hvm_or_pv, using default.")
return ""
def add_mem(help_text, mem, maxmem=None, hugepage=False):
if has_option(help_text, "memory"):
cmd = " --memory=%s" % mem
if maxmem:
if not has_sub_option('memory', 'maxmemory'):
logging.warning("maxmemory option not supported by "
"virt-install")
else:
cmd += ",maxmemory=%s" % maxmem
if hugepage:
if not has_sub_option('memory', 'hugepages'):
logging.warning("hugepages option not supported by "
"virt-install")
else:
cmd += ",hugepages=yes"
return cmd
else:
return " --ram=%s" % mem
def add_check_cpu(help_text):
if has_option(help_text, "check-cpu"):
return " --check-cpu"
else:
return ""
def add_smp(help_text, smp, maxvcpus=None, sockets=None,
cores=None, threads=None):
cmd = " --vcpu=%s" % smp
if maxvcpus:
cmd += ",maxvcpus=%s" % maxvcpus
if sockets:
cmd += ",sockets=%s" % sockets
if cores:
cmd += ",cores=%s" % cores
if threads:
cmd += ",threads=%s" % threads
return cmd
def add_numa(vcpus, max_mem, numa_nodes):
"""
Method to add Numa node to guest
:param vcpus: vcpus of guest
:param max_mem: max memory of guest
:param numa_nodes: No of guest numa nodes required
:return: appended numa parameter to virt-install cmd
"""
if not has_sub_option('cpu', 'cell'):
return ""
cmd = " --cpu"
cell = "cell%s.cpus=%s,cell%s.id=%s,cell%s.memory=%s"
cells = ""
# we need atleast 1 vcpu for 1 numa node
if numa_nodes > vcpus:
numa_nodes = vcpus
params['numa_nodes'] = vcpus
if vcpus > 1:
cpus = vcpus // numa_nodes
cpus_balance = vcpus % numa_nodes
memory = max_mem // numa_nodes
memory_balance = max_mem % numa_nodes
else:
cpus = vcpus
memory = max_mem
cpu_start = 0
for numa in range(numa_nodes):
if numa == numa_nodes - 1 and vcpus > 1:
cpus = cpus + cpus_balance
memory = memory + memory_balance
if cpus == 1:
cpu_str = "%s" % (cpu_start + (cpus - 1))
else:
cpu_str = "%s-%s" % (cpu_start, cpu_start + (cpus - 1))
cpu_start += cpus
cells += "%s," % cell % (numa, cpu_str, numa, numa, numa, memory)
cmd += " %s" % cells
return cmd.strip(",")
def pin_numa(help_text, host_numa_node_list):
"""
Method to pin guest numa with host numa
:param help_text: virt-install help message to check the option
:param host_numa_node_list: list of online host numa nodes
:return: parameter to pin host and guest numa with virt-install
"""
if not has_option(help_text, "numatune"):
return ""
cmd = " --numatune"
numa_pin_mode = params.get("numa_pin_mode", "strict")
# If user gives specific host numa nodes to pin by comma separated
# string pin_to_host_numa_node = "0,1,2", check if the numa
# node is in online numa list and use.
host_numa = str(params.get("pin_to_host_numa_node", ""))
if host_numa:
host_numa_list = host_numa.split(',')
for each_numa in host_numa_list:
if each_numa not in host_numa_node_list:
logging.error("host numa node - %s is not online or "
"doesn't have memory", each_numa)
host_numa_list.remove(each_numa)
if host_numa_list:
host_numa = ','.join(map(str, host_numa_list))
else:
return ""
# If user haven't mention any specific host numa nodes, use
# available online numa nodes
else:
host_numa = ','.join((map(str, host_numa_node_list)))
cmd += " %s,mode=%s" % (host_numa, numa_pin_mode)
return cmd
def pin_hugepage(help_text, hp_size, guest_numa):
"""
Method to pin hugepages to guest numa with virt-install
:param help_text: virt-install help message text
:param hp_size: hugepage size supported
:param guest_numa: guest numa nodes to be pinned with hugepage
:return: cmd parameter to pin hugepage with Numa with virt-install
"""
if not has_option(help_text, "memorybacking"):
return ""
cmd = " --memorybacking"
hp_unit = params.get("hugepage_unit", "KiB")
cmd += " size=%s,nodeset=%s,unit=%s" % (hp_size, guest_numa, hp_unit)
# Instructs hypervisor to disable shared pages (memory merge, KSM) for
# this domain
if params.get("hp_nosharepages", "no") == "yes":
cmd += ",nosharepages=yes"
# memory pages belonging to the domain will be locked in host's memory
# and the host will not be allowed to swap them out
if params.get("hp_locked", "no") == "yes":
cmd += ",locked=yes"
return cmd
def add_cpu_mode(virt_install_cmd, mode='', model='',
match='', vendor=False):
"""
To add cpu mode, model etc... params
:param virt_install_cmd: previous virt install cmd line
:param mode: cpu mode host-passthrough, host-model, custom
:param model: cpu model (coreduo, power8 etc.)
:param match: minimum, exact, strict
:param vendor: cpu vendor
:return: updated virt_install_cmd
"""
cmd = ''
cpu_match = re.match(r".*\s--cpu\s(\S+)\s", virt_install_cmd)
if cpu_match:
cmd = " --cpu %s," % cpu_match.group(1)
else:
cmd = " --cpu "
if mode and has_sub_option('cpu', 'mode'):
cmd += 'mode="%s",' % mode
if model and has_sub_option('cpu', 'model'):
cmd += 'model="%s",' % model
if match and has_sub_option('cpu', 'match'):
cmd += 'match="%s",' % match
if vendor and has_sub_option('cpu', 'vendor'):
cmd += 'vendor="%s",' % libvirt_xml.CapabilityXML().vendor
virt_install_cmd += cmd.strip(',')
return virt_install_cmd
def add_location(help_text, location):
if has_option(help_text, "location"):
return " --location %s" % location
else:
return ""
def add_cdrom(help_text, filename, index=None):
if has_option(help_text, "cdrom"):
return " --cdrom %s" % filename
else:
return ""
def add_pxe(help_text):
if has_option(help_text, "pxe"):
return " --pxe"
else:
return ""
def add_import(help_text):
if has_option(help_text, "import"):
return " --import"
else:
return ""
def add_controller(model=None):
"""
Add controller option for virt-install command line.
:param model: string, controller model.
:return: string, empty or controller option.
"""
if model == 'virtio-scsi':
return " --controller type=scsi,model=virtio-scsi"
else:
return ""
def check_controller(virt_install_cmd_line, controller):
"""
Check for the controller already available in virt-install
command line.
:param virt_install_cmd_line: string, virt-install command line.
:param controller: string, controller model.
:return: True if succeed of False if failed.
"""
found = False
output = re.findall(
r"controller\stype=(\S+),model=(\S+)", virt_install_cmd_line)
for item in output:
if controller in item[1]:
found = True
break
return found
def add_drive(help_text, filename, pool=None, vol=None, device=None,
bus=None, perms=None, size=None, sparse=False,
cache=None, fmt=None):
cmd = " --disk"
if filename:
cmd += " path=%s" % filename
elif pool:
if vol:
cmd += " vol=%s/%s" % (pool, vol)
else:
cmd += " pool=%s" % pool
if device:
cmd += ",device=%s" % device
if bus:
cmd += ",bus=%s" % bus
if perms:
cmd += ",%s" % perms
if size:
cmd += ",size=%s" % size.rstrip("Gg")
if sparse:
cmd += ",sparse=false"
if fmt:
cmd += ",format=%s" % fmt
if cache:
cmd += ",cache=%s" % cache
return cmd
def add_floppy(help_text, filename):
return " --disk path=%s,device=floppy,ro" % filename
def add_vnc(help_text, vnc_port=None):
if vnc_port:
return " --vnc --vncport=%d" % (vnc_port)
else:
return " --vnc"
def add_vnclisten(help_text, vnclisten):
if has_option(help_text, "vnclisten"):
return " --vnclisten=%s" % (vnclisten)
else:
return ""
def add_sdl(help_text):
if has_option(help_text, "sdl"):
return " --sdl"
else:
return ""
def add_nographic(help_text):
return " --nographics"
def add_video(help_text, video_device):
if has_option(help_text, "video"):
return " --video=%s" % (video_device)
else:
return ""
def add_uuid(help_text, uuid):
if has_option(help_text, "uuid"):
return " --uuid %s" % uuid
else:
return ""
def add_os_type(help_text, os_type):
if has_option(help_text, "os-type"):
return " --os-type %s" % os_type
else:
return ""
def add_os_variant(help_text, os_variant):
if has_option(help_text, "os-variant"):
return " --os-variant %s" % os_variant
else:
return ""
def add_pcidevice(help_text, pci_device):
if has_option(help_text, "host-device"):
return " --host-device %s" % pci_device
else:
return ""
def add_soundhw(help_text, sound_device):
if has_option(help_text, "soundhw"):
return " --soundhw %s" % sound_device
else:
return ""
def add_serial(help_text):
if has_option(help_text, "serial"):
return " --serial pty"
else:
return ""
def add_kernel_cmdline(help_text, cmdline):
return " -append %s" % cmdline
def add_connect_uri(help_text, uri):
if uri and has_option(help_text, "connect"):
return " --connect=%s" % uri
else:
return ""
def add_security(help_text, sec_type, sec_label=None, sec_relabel=None):
"""
Return security options for install command.
"""
if has_option(help_text, "security"):
result = " --security"
if sec_type == 'static':
if sec_label is None:
raise ValueError("Seclabel is not setted for static.")
result += " type=static,label=%s" % (sec_label)
elif sec_type == 'dynamic':
result += " type=dynamic"
else:
raise ValueError("Security type %s is not supported."
% sec_type)
if sec_relabel is not None:
result += ",relabel=%s" % sec_relabel
else:
result = ""
return result
def add_nic(help_text, nic_params):
"""
Return additional command line params based on dict-like nic_params
"""
mac = nic_params.get('mac')
nettype = nic_params.get('nettype')
netdst = nic_params.get('netdst')
nic_model = nic_params.get('nic_model')
if nettype:
result = " --network=%s" % nettype
else:
result = ""
if has_option(help_text, "bridge"):
# older libvirt (--network=NATdev --bridge=bridgename
# --mac=mac)
if nettype != 'user':
result += ':%s' % netdst
if mac: # possible to specify --mac w/o --network
result += " --mac=%s" % mac
else:
# newer libvirt (--network=mynet,model=virtio,mac=00:11)
if nettype != 'user':
result += '=%s' % netdst
if nettype and nic_model: # only supported along with nettype
result += ",model=%s" % nic_model
if nettype and mac:
result += ',mac=%s' % mac
elif mac: # possible to specify --mac w/o --network
result += " --mac=%s" % mac
logging.debug("vm.make_create_command.add_nic returning: %s",
result)
return result
def add_memballoon(help_text, memballoon_model):
"""
Adding memballoon device to the vm.
:param help_text: string, virt-install help text.
:param memballon_model: string, memballoon model.
:return: string, empty or memballoon model option.
"""
if has_option(help_text, "memballoon"):
result = " --memballoon model=%s" % memballoon_model
else:
logging.warning("memballoon is not supported")
result = ""
logging.debug("vm.add_memballoon returning: %s", result)
return result
def add_kernel(help_text, cmdline, kernel_path=None, initrd_path=None,
kernel_args=None):
"""
Adding Custom kernel option to boot.
: param help_text: string, virt-install help text
: param cmdline: string, current virt-install cmdline
: param kernel_path: string, custom kernel path.
: param initrd_path: string, custom initrd path.
: param kernel_args: string, custom boot args.
"""
if has_option(help_text, "boot"):
if "--boot" in cmdline:
result = ","
else:
result = " --boot "
if has_sub_option("boot", "kernel") and kernel_path:
result += "kernel=%s," % kernel_path
if has_sub_option("boot", "initrd") and initrd_path:
result += "initrd=%s," % initrd_path
if has_sub_option("boot", "kernel_args") and kernel_args:
result += "kernel_args=\"%s\"," % kernel_args
else:
result = ""
logging.warning("boot option is not supported")
return result.rstrip(',')
# End of command line option wrappers
if name is None:
name = self.name
if params is None:
params = self.params
if root_dir is None:
root_dir = self.root_dir
# Clone this VM using the new params
vm = self.clone(name, params, root_dir, copy_state=True)
virt_install_binary = utils_misc.get_path(
root_dir,
params.get("virt_install_binary",
"virt-install"))
help_text = decode_to_text(process.system_output("%s --help" % virt_install_binary,
verbose=False))
try:
os_text = decode_to_text(process.system_output("osinfo-query os --fields short-id", verbose=False))
except process.CmdError:
os_text = decode_to_text(process.system_output("%s --os-variant list" %
virt_install_binary,
verbose=False))
# Find all supported machine types, so we can rule out an unsupported
# machine type option passed in the configuration.
hvm_or_pv = params.get("hvm_or_pv", "hvm")
# default to 'uname -m' output
arch_name = params.get("vm_arch_name", platform.machine())
capabs = libvirt_xml.CapabilityXML()
try:
support_machine_type = capabs.guest_capabilities[
hvm_or_pv][arch_name]['machine']
except KeyError as detail:
if detail.args[0] == hvm_or_pv:
raise KeyError("No libvirt support for %s virtualization, "
"does system hardware + software support it?"
% hvm_or_pv)
elif detail.args[0] == arch_name:
raise KeyError("No libvirt support for %s virtualization of "
"%s, does system hardware + software support "
"it?" % (hvm_or_pv, arch_name))
raise
logging.debug("Machine types supported for %s/%s: %s",
hvm_or_pv, arch_name, support_machine_type)
# Start constructing the qemu command
virt_install_cmd = ""
# Set the X11 display parameter if requested
if params.get("x11_display"):
virt_install_cmd += "DISPLAY=%s " % params.get("x11_display")
# Add the qemu binary
virt_install_cmd += virt_install_binary
# set connect uri
virt_install_cmd += add_connect_uri(help_text, self.connect_uri)
# hvm or pv specified by libvirt switch (pv used by Xen only)
if hvm_or_pv:
virt_install_cmd += add_hvm_or_pv(help_text, hvm_or_pv)
# Add the VM's name
virt_install_cmd += add_name(help_text, name)
machine_type = params.get("machine_type")
if machine_type:
if machine_type in support_machine_type:
virt_install_cmd += add_machine_type(help_text, machine_type)
else:
raise exceptions.TestSkipError("Unsupported machine type %s." %
(machine_type))
mem = params.get("mem")
maxmemory = params.get("maxmemory", None)
# hugepage setup in host will be taken care in env_process
hugepage = params.get("hugepage", "no") == "yes"
if mem:
virt_install_cmd += add_mem(help_text, mem, maxmemory, hugepage)
# TODO: should we do the check before we call ? negative case ?
check_cpu = params.get("use_check_cpu")
if check_cpu:
virt_install_cmd += add_check_cpu(help_text)
smp = params.get("smp")
vcpu_max_cpus = params.get("vcpu_maxcpus")
vcpu_sockets = params.get("vcpu_sockets")
vcpu_cores = params.get("vcpu_cores")
vcpu_threads = params.get("vcpu_threads")
if smp:
virt_install_cmd += add_smp(help_text, smp, vcpu_max_cpus,
vcpu_sockets, vcpu_cores, vcpu_threads)
numa = params.get("numa", "no") == "yes"
if numa:
# Number of numa nodes required can be set in param
numa_nodes = int(params.get("numa_nodes", 2))
numa_vcpus = int(smp)
# virt-install takes --memory in MiB but --cpu cell adds numa
# memory in KiB by default
numa_memory = int(mem) * 1024
if vcpu_max_cpus:
numa_vcpus = int(vcpu_max_cpus)
if maxmemory:
numa_memory = int(maxmemory)
virt_install_cmd += add_numa(numa_vcpus, numa_memory, numa_nodes)
if params.get("numa_pin", "no") == "yes":
# Get online host numa nodes
host_numa_node = utils_misc.NumaInfo()
host_numa_node_list = host_numa_node.online_nodes
# check if memory is available in host numa node
for each_numa in host_numa_node_list:
if hugepage:
hp = test_setup.HugePageConfig(params)
free_hp = host_numa_node.read_from_node_meminfo(each_numa,
"HugePages_Free")
free_mem = int(free_hp) * int(hp.get_hugepage_size())
else:
free_mem = int(host_numa_node.read_from_node_meminfo(each_numa,
'MemFree'))
# Numa might be online but if it doesn't have free memory,
# skip it
if free_mem == 0:
logging.debug("Host numa node: %s doesn't have memory",
each_numa)
host_numa_node_list.remove(each_numa)
if not host_numa_node_list:
logging.error("Host Numa nodes are not online or doesn't "
"have memory to pin")
else:
virt_install_cmd += pin_numa(help_text, host_numa_node_list)
if params.get("hugepage_pin", "no") == "yes":
if numa and hugepage:
# get host hugepage size
hp_obj = test_setup.HugePageConfig(params)
hp_size = hp_obj.get_hugepage_size()
# specify numa nodes to be backed by HP by comma separated
# string, hugepage_pinnned_numa = "0-2,4" to back guest numa
# nodes 0 to 2 and 4.
guest_numa = str(params.get("hugepage_pinned_numa"))
if guest_numa == 'None':
# if user didn't mention hugepage_pinnned_numa use
# numa_nodes to back all the numa nodes.
guest_numa = int(params.get("numa_nodes", 2))
guest_numa = ','.join(map(str, list(range(guest_numa))))
virt_install_cmd += pin_hugepage(help_text, hp_size, guest_numa)
else:
logging.error("Can't pin hugepage without hugepage enabled"
"and Numa enabled")
cpu_mode = params.get("virt_cpu_mode", '')
if cpu_mode:
virt_install_cmd = add_cpu_mode(virt_install_cmd,
mode=cpu_mode,
model=params.get('virt_cpu_model', ''),
match=params.get('virt_cpu_match', ''),
vendor=params.get('virt_cpu_vendor', False))
# TODO: directory location for vmlinuz/kernel for cdrom install ?
location = None
if params.get("medium") == 'url':
location = params.get('url')
elif params.get("medium") == 'kernel_initrd':
# directory location of kernel/initrd pair (directory layout must
# be in format libvirt will recognize)
location = params.get("image_dir")
elif params.get("medium") == 'nfs':
location = "nfs:%s:%s" % (params.get("nfs_server"),
params.get("nfs_dir"))
elif params.get("medium") == 'cdrom':
if params.get("use_libvirt_cdrom_switch") == 'yes':
virt_install_cmd += add_cdrom(
help_text, params.get("cdrom_cd1"))
elif params.get("unattended_delivery_method") == "integrated":
cdrom_path = os.path.join(data_dir.get_data_dir(),
params.get("cdrom_unattended"))
virt_install_cmd += add_cdrom(help_text, cdrom_path)
else:
location = data_dir.get_data_dir()
kernel_dir = os.path.dirname(params.get("kernel"))
kernel_parent_dir = os.path.dirname(kernel_dir)
pxeboot_link = os.path.join(kernel_parent_dir, "pxeboot")
if os.path.islink(pxeboot_link):
os.unlink(pxeboot_link)
if os.path.isdir(pxeboot_link):
logging.info("Removed old %s leftover directory",
pxeboot_link)
shutil.rmtree(pxeboot_link)
os.symlink(kernel_dir, pxeboot_link)
elif params.get("medium") == "import":
virt_install_cmd += add_import(help_text)
if location:
virt_install_cmd += add_location(help_text, location)
# Disable display when vga is disabled (used mainly by machines.cfg)
if params.get("vga") == "none":
virt_install_cmd += add_nographic(help_text)
elif params.get("display") == "vnc":
if params.get("vnc_autoport") == "yes":
vm.vnc_autoport = True
else:
vm.vnc_autoport = False
if not vm.vnc_autoport and params.get("vnc_port"):
vm.vnc_port = int(params.get("vnc_port"))
virt_install_cmd += add_vnc(help_text, vm.vnc_port)
if params.get("vnclisten"):
vm.vnclisten = params.get("vnclisten")
virt_install_cmd += add_vnclisten(help_text, vm.vnclisten)
elif params.get("display") == "sdl":
virt_install_cmd += add_sdl(help_text)
elif params.get("display") == "nographic":
virt_install_cmd += add_nographic(help_text)
video_device = params.get("video_device")
if video_device:
virt_install_cmd += add_video(help_text, video_device)
sound_device = params.get("sound_device")
if sound_device:
virt_install_cmd += add_soundhw(help_text, sound_device)
# if none is given a random UUID will be generated by libvirt
if params.get("uuid"):
virt_install_cmd += add_uuid(help_text, params.get("uuid"))
# selectable OS type
if params.get("use_os_type") == "yes":
virt_install_cmd += add_os_type(help_text, params.get("os_type"))
# selectable OS variant
if params.get("use_os_variant") == "yes":
if not has_os_variant(os_text, params.get("os_variant")):
raise exceptions.TestSkipError("Unsupported OS variant: %s.\n"
"Supported variants: %s" %
(params.get('os_variant'),
os_text))
virt_install_cmd += add_os_variant(
help_text, params.get("os_variant"))
# Add serial console
virt_install_cmd += add_serial(help_text)
# Add memballoon device
memballoon_model = params.get("memballoon_model")
if memballoon_model:
virt_install_cmd += add_memballoon(help_text, memballoon_model)
# If the PCI assignment step went OK, add each one of the PCI assigned
# devices to the command line.
if self.pci_devices:
for pci_id in self.pci_devices:
virt_install_cmd += add_pcidevice(help_text, pci_id)
for image_name in params.objects("images"):
image_params = params.object_params(image_name)
base_dir = image_params.get("images_base_dir",
data_dir.get_data_dir())
filename = storage.get_image_filename(image_params,
base_dir)
if image_params.get("use_storage_pool") == "yes":
filename = None
virt_install_cmd += add_drive(help_text,
filename,
image_params.get("image_pool"),
image_params.get("image_vol"),
image_params.get("image_device"),
image_params.get("image_bus"),
image_params.get("image_perms"),
image_params.get("image_size"),
image_params.get("drive_sparse"),
image_params.get("drive_cache"),
image_params.get("image_format"))
if image_params.get("boot_drive") == "no":
continue
if filename:
libvirt_controller = image_params.get(
"libvirt_controller", None)
_drive_format = image_params.get("drive_format")
if libvirt_controller:
if not check_controller(virt_install_cmd, libvirt_controller):
virt_install_cmd += add_controller(libvirt_controller)
# this will reset the scsi-hd to scsi as we are adding controller
# to mention the drive format
if 'scsi' in _drive_format:
_drive_format = "scsi"
virt_install_cmd += add_drive(help_text,
filename,
None,
None,
None,
_drive_format,
None,
image_params.get("image_size"),
image_params.get("drive_sparse"),
image_params.get("drive_cache"),
image_params.get("image_format"))
unattended_integrated = (params.get('unattended_delivery_method') !=
'integrated')
xen_pv = self.driver_type == 'xen' and params.get('hvm_or_pv') == 'pv'
if unattended_integrated and not xen_pv:
for cdrom in params.objects("cdroms"):
cdrom_params = params.object_params(cdrom)
iso = cdrom_params.get("cdrom")
if params.get("use_libvirt_cdrom_switch") == 'yes':
# we don't want to skip the winutils iso
if not cdrom == 'winutils':
logging.debug(
"Using --cdrom instead of --disk for install")
logging.debug("Skipping CDROM:%s:%s", cdrom, iso)
continue
if params.get("medium") == 'cdrom_no_kernel_initrd':
if iso == params.get("cdrom_cd1"):
logging.debug("Using cdrom or url for install")
logging.debug("Skipping CDROM: %s", iso)
continue
if iso:
iso_path = utils_misc.get_path(root_dir, iso)
iso_image_pool = image_params.get("iso_image_pool")
iso_image_vol = image_params.get("iso_image_vol")
virt_install_cmd += add_drive(help_text,
iso_path,
iso_image_pool,
virt_install_cmd,
'cdrom',
None,
None,
None,
None,
None,
None)
# We may want to add {floppy_otps} parameter for -fda
# {fat:floppy:}/path/. However vvfat is not usually recommended.
# Only support to add the main floppy if you want to add the second
# one please modify this part.
floppy = params.get("floppy_name")
if floppy:
floppy = utils_misc.get_path(data_dir.get_data_dir(), floppy)
virt_install_cmd += add_drive(help_text, floppy,
None,
None,
'floppy',
None,
None,
None,
None,
None,
None)
# setup networking parameters
for nic in vm.virtnet:
# make_create_command can be called w/o vm.create()
nic = vm.add_nic(**dict(nic))
logging.debug("make_create_command() setting up command for"
" nic: %s" % str(nic))
virt_install_cmd += add_nic(help_text, nic)
if params.get("use_no_reboot") == "yes":
virt_install_cmd += " --noreboot"
if params.get("use_autostart") == "yes":
virt_install_cmd += " --autostart"
if params.get("virt_install_debug") == "yes":
virt_install_cmd += " --debug"
emulator_path = params.get("emulator_path", None)
if emulator_path:
if not has_sub_option('boot', 'emulator'):
logging.warning("emulator option not supported by virt-install")
else:
virt_install_cmd += " --boot emulator=%s" % emulator_path
kernel = params.get("kernel", None)
initrd = params.get("initrd", None)
kernel_args = params.get("kernel_args", None)
if (kernel or initrd) and kernel_args:
virt_install_cmd += add_kernel(help_text, virt_install_cmd, kernel,
initrd, kernel_args)
# bz still open, not fully functional yet
if params.get("use_virt_install_wait") == "yes":
virt_install_cmd += (" --wait %s" %
params.get("virt_install_wait_time"))
kernel_params = params.get("kernel_params")
if kernel_params:
virt_install_cmd += " --extra-args '%s'" % kernel_params
virt_install_cmd += " --noautoconsole"
sec_type = params.get("sec_type", None)
if sec_type:
sec_label = params.get("sec_label", None)
sec_relabel = params.get("sec_relabel", None)
virt_install_cmd += add_security(help_text, sec_type=sec_type,
sec_label=sec_label,
sec_relabel=sec_relabel)
virtinstall_extra_args = params.get("virtinstall_extra_args", "")
if virtinstall_extra_args:
virt_install_cmd += " %s" % virtinstall_extra_args
return virt_install_cmd
def get_serial_console_filename(self, name):
"""
Return the serial console filename.
:param name: The serial port name.
"""
return "serial-%s-%s-%s.log" % (name, self.name,
utils_misc.generate_random_string(4))
def get_serial_console_filenames(self):
"""
Return a list of all serial console filenames
(as specified in the VM's params).
"""
return [self.get_serial_console_filename(_) for _ in
self.params.objects("serials")]
def _create_serial_console(self):
"""
Establish a session with the serial console.
The libvirt version uses virsh console to manage it.
"""
if not self.serial_ports:
for serial in self.params.objects("serials"):
self.serial_ports.append(serial)
if self.serial_console is None or self.serial_console.closed:
try:
cmd = 'virsh'
if self.connect_uri:
cmd += ' -c %s' % self.connect_uri
cmd += (" console %s %s" % (self.name, self.serial_ports[0]))
except IndexError:
raise virt_vm.VMConfigMissingError(self.name, "serial")
output_func = utils_misc.log_line # Because qemu-kvm uses this
# Because qemu-kvm hard-codes this
output_filename = self.get_serial_console_filename(self.serial_ports[0])
output_params = (output_filename,)
prompt = self.params.get("shell_prompt", "[\#\$]")
self.serial_console = aexpect.ShellSession(command=cmd, auto_close=False,
output_func=output_func,
output_params=output_params,
prompt=prompt)
# Cause serial_console.close() to close open log file
self.serial_console.set_log_file(output_filename)
self.serial_console_log = os.path.join(utils_misc.get_log_file_dir(),
output_filename)
def set_root_serial_console(self, device, remove=False):
"""
Allow or ban root to login through serial console.
:param device: device to set root login
:param allow_root: do remove operation
"""
try:
session = self.login()
except (remote.LoginError, virt_vm.VMError) as e:
logging.debug(e)
else:
try:
securetty_output = session.cmd_output("cat /etc/securetty")
devices = str(securetty_output).strip().splitlines()
if device not in devices:
if not remove:
session.sendline("echo %s >> /etc/securetty" % device)
else:
if remove:
session.sendline("sed -i -e /%s/d /etc/securetty"
% device)
logging.debug("Set root login for %s successfully.", device)
return True
finally:
session.close()
logging.debug("Set root login for %s failed.", device)
return False
def set_kernel_console(self, device, speed=None, remove=False):
"""
Set kernel parameter for given console device.
:param device: a console device
:param speed: speed of serial console
:param remove: do remove operation
"""
try:
session = self.login()
except (remote.LoginError, virt_vm.VMError) as e:
logging.debug(e)
else:
try:
grub = "/boot/grub/grub.conf"
if not session.cmd_status("ls /boot/grub2/grub.cfg"):
grub = "/boot/grub2/grub.cfg"
kernel_params = "console=%s" % device
if speed is not None:
kernel_params += ",%s" % speed
output = session.cmd_output("cat %s" % grub)
if not re.search("console=%s" % device, output):
if not remove:
session.sendline("sed -i -e \'s/vmlinuz-.*/& %s/g\'"
" %s; sync" % (kernel_params, grub))
else:
if remove:
session.sendline("sed -i -e \'s/console=%s\w*\s//g\'"
" %s; sync" % (device, grub))
logging.debug("Set kernel params for %s successfully.", device)
return True
finally:
session.close()
logging.debug("Set kernel params for %s failed.", device)
return False
def set_kernel_param(self, parameter, value=None, remove=False):
"""
Set a specific kernel parameter.
:param option: A kernel parameter to set.
:param value: The value of the parameter to be set.
:param remove: Remove the parameter if True.
:return: True if succeed of False if failed.
"""
if self.is_dead():
logging.error("Can't set kernel param on a dead VM.")
return False
session = self.wait_for_login()
try:
grub_path = utils_misc.get_bootloader_cfg(session)
if not grub_path:
return False
grub_text = session.cmd_output("cat %s" % grub_path)
kernel_lines = [l.strip() for l in grub_text.splitlines()
if re.match(r"\s*(linux|kernel).*", l)]
if not kernel_lines:
logging.error("Can't find any kernel lines in grub "
"file %s:\n%s" % (grub_path, grub_text))
return False
for line in kernel_lines:
line = line.replace('\t', r'\t')
if remove:
new_string = ""
else:
if value is None:
new_string = parameter
else:
new_string = "%s=%s" % (parameter, value)
patts = [
"\s+(%s=\S*)(\s|$)" % parameter,
"\s+(%s)(\s|$)" % parameter,
]
old_string = ""
for patt in patts:
res = re.search(patt, line)
if res:
old_string = res.group(1)
break
if old_string:
new_line = line.replace(old_string, new_string)
else:
new_line = " ".join((line, new_string))
line_patt = "\s*".join(line.split())
logging.debug("Substituting grub line '%s' to '%s'." %
(line, new_line))
stat_sed, output = session.cmd_status_output(
"sed -i --follow-symlinks -e \"s@%s@%s@g\" %s" %
(line_patt, new_line, grub_path))
if stat_sed:
logging.error("Failed to substitute grub file:\n%s" %
output)
return False
if remove:
logging.debug("Remove kernel params %s successfully.",
parameter)
else:
logging.debug("Set kernel params %s to %s successfully.",
parameter, value)
return True
finally:
session.close()
def set_boot_kernel(self, index, debug_kernel=False):
"""
Set default kernel to the second one or to debug kernel
:param index: index of kernel to set to default
:param debug_kernel: True if set debug kernel to default
:return: default kernel
"""
if self.is_dead():
logging.error("Can't set kernel param on a dead VM.")
return False
session = self.wait_for_login()
try:
grub_path = utils_misc.get_bootloader_cfg(session)
if not grub_path:
return
if "grub2" in grub_path:
grub = 2
output = session.cmd("cat %s |grep menuentry" % grub_path)
kernel_list = re.findall("menuentry '.*?'", output)
else:
grub = 1
output = session.cmd("cat %s |grep initramfs" % grub_path)
kernel_list = re.findall("-.*", output)
if index >= len(kernel_list):
logging.error("Index out of kernel list")
return
logging.debug("kernel list of vm:")
logging.debug(kernel_list)
if debug_kernel:
index = -1
logging.info("Setting debug kernel as default")
for i in range(len(kernel_list)):
if "debug" in kernel_list[i] and 'rescue' not in kernel_list[i].lower():
index = i
break
if index == -1:
logging.error("No debug kernel in grub file!")
return
if grub == 1:
cmd_set_grub = "sed -i 's/default=./default=%d/' " % index
cmd_set_grub += grub_path
boot_kernel = kernel_list[index].strip("-")
else:
boot_kernel = kernel_list[index].split("'")[1].strip("'")
cmd_set_grub = 'grub2-set-default %d' % index
session.cmd(cmd_set_grub)
return boot_kernel
finally:
session.close()
def has_swap(self):
"""
Check if there is any active swap partition/file.
:return : True if swap is on or False otherwise.
"""
if self.is_dead():
logging.error("Can't check swap on a dead VM.")
return False
session = self.wait_for_login()
try:
cmd = "swapon -s"
output = session.cmd_output(cmd)
if output.strip():
return True
return False
finally:
session.close()
def create_swap_partition(self, swap_path=None):
"""
Make a swap partition and active it.
A cleanup_swap() should be call after use to clean up
the environment changed.
:param swap_path: Swap image path.
"""
if self.is_dead():
logging.error("Can't create swap on a dead VM.")
return False
if not swap_path:
swap_path = os.path.join(data_dir.get_tmp_dir(), "swap_image")
swap_size = self.get_used_mem()
process.run("qemu-img create %s %s" % (swap_path, swap_size * 1024))
self.created_swap_path = swap_path
device = self.attach_disk(swap_path, extra="--persistent")
session = self.wait_for_login()
try:
dev_path = "/dev/" + device
session.cmd_status("mkswap %s" % dev_path)
session.cmd_status("swapon %s" % dev_path)
self.set_kernel_param("resume", dev_path)
return True
finally:
session.close()
logging.error("Failed to create a swap partition.")
return False
def create_swap_file(self, swapfile='/swapfile'):
"""
Make a swap file and active it through a session.
A cleanup_swap() should be call after use to clean up
the environment changed.
:param swapfile: Swap file path in VM to be created.
"""
if self.is_dead():
logging.error("Can't create swap on a dead VM.")
return False
session = self.wait_for_login()
try:
# Get memory size.
swap_size = self.get_used_mem() // 1024
# Create, change permission, and make a swap file.
cmd = ("dd if=/dev/zero of={1} bs=1M count={0} && "
"chmod 600 {1} && "
"mkswap {1}".format(swap_size, swapfile))
stat_create, output = session.cmd_status_output(cmd)
if stat_create:
logging.error("Fail to create swap file in guest."
"\n%s" % output)
return False
self.created_swap_file = swapfile
# Get physical swap file offset for kernel param resume_offset.
cmd = "filefrag -v %s" % swapfile
output = session.cmd_output(cmd)
# For compatibility of different version of filefrag
# Sample output of 'filefrag -v /swapfile'
# On newer version:
# Filesystem type is: 58465342
# File size of /swapfile is 1048576000 (256000 blocks of 4096 bytes)
# ext: logical_offset: physical_offset: length: expected: flags:
# 0: 0.. 65519: 395320.. 460839: 65520:
# ...
# On older version:
# Filesystem type is: ef53
# File size of /swapfile is 1048576000 (256000 blocks, blocksize 4096)
# ext logical physical expected length flags
# 0 0 2465792 32768
# ...
offset_line = output.splitlines()[3]
if '..' in offset_line:
offset = offset_line.split()[3].rstrip('..')
else:
offset = offset_line.split()[2]
# Get physical swap file device for kernel param resume.
cmd = "df %s" % swapfile
output = session.cmd_output(cmd)
# Sample output of 'df /swapfile':
# Filesystem 1K-blocks Used Available Use% Mounted on
# /dev/vdb 52403200 15513848 36889352 30% /
device = output.splitlines()[1].split()[0]
# Set kernel parameters.
self.set_kernel_param("resume", device)
self.set_kernel_param("resume_offset", offset)
finally:
session.close()
self.reboot()
session = self.wait_for_login()
try:
# Activate a swap file.
cmd = "swapon %s" % swapfile
stat_swapon, output = session.cmd_status_output(cmd)
if stat_create:
logging.error("Fail to activate swap file in guest."
"\n%s" % output)
return False
finally:
session.close()
if self.has_swap():
logging.debug("Successfully created swapfile %s." % swapfile)
return True
else:
logging.error("Failed to create swap file.")
return False
def cleanup_swap(self):
"""
Cleanup environment changed by create_swap_partition() or
create_swap_file().
"""
if self.is_dead():
logging.error("Can't cleanup swap on a dead VM.")
return False
# Remove kernel parameters.
self.set_kernel_param("resume", remove=True)
self.set_kernel_param("resume_offset", remove=True)
# Deactivate swap partition/file.
session = self.wait_for_login()
try:
session.cmd_status("swapoff -a")
if "created_swap_file" in dir(self):
session.cmd_status("rm -f %s" % self.created_swap_file)
del self.created_swap_file
finally:
session.close()
# Cold unplug attached swap disk
if self.shutdown():
if "created_swap_device" in dir(self):
self.detach_disk(
self.created_swap_device, extra="--persistent")
del self.created_swap_device
if "created_swap_path" in dir(self):
os.remove(self.created_swap_path)
del self.created_swap_path
def set_console_getty(self, device, getty="mgetty", remove=False):
"""
Set getty for given console device.
:param device: a console device
:param getty: getty type: agetty, mgetty and so on.
:param remove: do remove operation
"""
try:
session = self.login()
except (remote.LoginError, virt_vm.VMError) as e:
logging.debug(e)
else:
try:
# Only configurate RHEL5 and below
regex = "gettys are handled by"
# As of RHEL7 systemd message is displayed
regex += "|inittab is no longer used when using systemd"
output = session.cmd_output("cat /etc/inittab")
if re.search(regex, output):
logging.debug("Skip setting inittab for %s", device)
return True
getty_str = "co:2345:respawn:/sbin/%s %s" % (getty, device)
matched_str = "respawn:/sbin/*getty %s" % device
if not re.search(matched_str, output):
if not remove:
session.sendline("echo %s >> /etc/inittab" % getty_str)
else:
if remove:
session.sendline("sed -i -e /%s/d "
"/etc/inittab" % matched_str)
logging.debug("Set inittab for %s successfully.", device)
return True
finally:
session.close()
logging.debug("Set inittab for %s failed.", device)
return False
def cleanup_serial_console(self):
"""
Close serial console and associated log file
"""
if self.serial_console is not None:
if self.is_lxc():
self.serial_console.sendline("^]")
self.serial_console.close()
self.serial_console = None
self.serial_console_log = None
self.console_manager.set_console(None)
if hasattr(self, "migration_file"):
try:
os.unlink(self.migration_file)
except OSError:
pass
def wait_for_login(self, nic_index=0, timeout=None,
internal_timeout=None,
serial=False, restart_network=False,
username=None, password=None):
"""
Override the wait_for_login method of virt_vm to support other
guest in libvirt.
If connect_uri is lxc related, we call wait_for_serial_login()
directly, without attempting login it via network.
Other connect_uri, call virt_vm.wait_for_login().
"""
# Set the default value of parameters if user did not use it.
if not timeout:
timeout = super(VM, self).LOGIN_WAIT_TIMEOUT
if not internal_timeout:
internal_timeout = super(VM, self).LOGIN_TIMEOUT
if self.is_lxc():
self.cleanup_serial_console()
self.create_serial_console()
return self.wait_for_serial_login(timeout, internal_timeout,
restart_network,
username, password)
return super(VM, self).wait_for_login(nic_index, timeout,
internal_timeout,
serial, restart_network,
username, password)
@error_context.context_aware
def create(self, name=None, params=None, root_dir=None, timeout=5.0,
migration_mode=None, mac_source=None, autoconsole=True):
"""
Start the VM by running a qemu command.
All parameters are optional. If name, params or root_dir are not
supplied, the respective values stored as class attributes are used.
:param name: The name of the object
:param params: A dict containing VM params
:param root_dir: Base directory for relative filenames
:param migration_mode: If supplied, start VM for incoming migration
using this protocol (either 'tcp', 'unix' or 'exec')
:param migration_exec_cmd: Command to embed in '-incoming "exec: ..."'
(e.g. 'gzip -c -d filename') if migration_mode is 'exec'
:param mac_source: A VM object from which to copy MAC addresses. If not
specified, new addresses will be generated.
:raise VMCreateError: If qemu terminates unexpectedly
:raise VMKVMInitError: If KVM initialization fails
:raise VMHugePageError: If hugepage initialization fails
:raise VMImageMissingError: If a CD image is missing
:raise VMHashMismatchError: If a CD image hash has doesn't match the
expected hash
:raise VMBadPATypeError: If an unsupported PCI assignment type is
requested
:raise VMPAError: If no PCI assignable devices could be assigned
"""
error_context.context("creating '%s'" % self.name)
self.destroy(free_mac_addresses=False)
if name is not None:
self.name = name
if params is not None:
self.params = params
if root_dir is not None:
self.root_dir = root_dir
name = self.name
params = self.params
root_dir = self.root_dir
# Verify the md5sum of the ISO images
for cdrom in params.objects("cdroms"):
if params.get("medium") == "import":
break
cdrom_params = params.object_params(cdrom)
iso = cdrom_params.get("cdrom")
xen_pv = (self.driver_type == 'xen' and
params.get('hvm_or_pv') == 'pv')
iso_is_ks = os.path.basename(iso) == 'ks.iso'
if xen_pv and iso_is_ks:
continue
if iso:
iso = utils_misc.get_path(data_dir.get_data_dir(), iso)
if not os.path.exists(iso):
raise virt_vm.VMImageMissingError(iso)
compare = False
if cdrom_params.get("skip_hash", "no") == "yes":
logging.debug("Skipping hash comparison")
elif cdrom_params.get("md5sum_1m"):
logging.debug("Comparing expected MD5 sum with MD5 sum of "
"first MB of ISO file...")
actual_hash = crypto.hash_file(
iso, 1048576, algorithm="md5")
expected_hash = cdrom_params.get("md5sum_1m")
compare = True
elif cdrom_params.get("md5sum"):
logging.debug("Comparing expected MD5 sum with MD5 sum of "
"ISO file...")
actual_hash = crypto.hash_file(iso, algorithm="md5")
expected_hash = cdrom_params.get("md5sum")
compare = True
elif cdrom_params.get("sha1sum"):
logging.debug("Comparing expected SHA1 sum with SHA1 sum "
"of ISO file...")
actual_hash = crypto.hash_file(iso, algorithm="sha1")
expected_hash = cdrom_params.get("sha1sum")
compare = True
if compare:
if actual_hash == expected_hash:
logging.debug("Hashes match")
else:
raise virt_vm.VMHashMismatchError(actual_hash,
expected_hash)
# Make sure the following code is not executed by more than one thread
# at the same time
lockfilename = os.path.join(data_dir.get_tmp_dir(),
"libvirt-autotest-vm-create.lock")
lockfile = open(lockfilename, "w+")
fcntl.lockf(lockfile, fcntl.LOCK_EX)
try:
# Handle port redirections
redir_names = params.objects("redirs")
host_ports = utils_misc.find_free_ports(
5000, 6000, len(redir_names))
self.redirs = {}
for i in range(len(redir_names)):
redir_params = params.object_params(redir_names[i])
guest_port = int(redir_params.get("guest_port"))
self.redirs[guest_port] = host_ports[i]
# Find available PCI devices
self.pci_devices = []
for device in params.objects("pci_devices"):
self.pci_devices.append(device)
# Find available VNC port, if needed
if params.get("display") == "vnc":
if params.get("vnc_autoport") == "yes":
self.vnc_port = None
self.vnc_autoport = True
else:
self.vnc_port = utils_misc.find_free_port(5900, 6100)
self.vnc_autoport = False
# Find available spice port, if needed
if params.get("spice"):
self.spice_port = utils_misc.find_free_port(8000, 8100)
# Find random UUID if specified 'uuid = random' in config file
if params.get("uuid") == "random":
f = open("/proc/sys/kernel/random/uuid")
self.uuid = f.read().strip()
f.close()
# Generate or copy MAC addresses for all NICs
for nic in self.virtnet:
nic_params = dict(nic)
if mac_source is not None:
# Will raise exception if source doesn't
# have corresponding nic
logging.debug("Copying mac for nic %s from VM %s",
nic.nic_name, mac_source.name)
nic_params['mac'] = mac_source.get_mac_address(
nic.nic_name)
# make_create_command() calls vm.add_nic (i.e. on a copy)
nic = self.add_nic(**nic_params)
logging.debug('VM.create activating nic %s' % nic)
self.activate_nic(nic.nic_name)
# Make qemu command
install_command = self.make_create_command()
logging.info("Running libvirt command (reformatted):")
for item in install_command.replace(" -", " \n -").splitlines():
logging.info("%s", item)
try:
process.run(install_command, verbose=True, shell=True)
except process.CmdError as details:
stderr = results_stderr_52lts(details.result).strip()
# This is a common newcomer mistake, be more helpful...
if stderr.count('IDE CDROM must use'):
testname = params.get('name', "")
if testname.count('unattended_install.cdrom'):
if not testname.count('http_ks'):
e_msg = ("Install command "
"failed:\n%s \n\nNote: "
"Older versions of "
"libvirt won't work "
"properly with kickstart "
"on cdrom install. "
"Try using the "
"unattended_install.cdrom.http_ks method "
"instead." % details.result)
raise exceptions.TestSkipError(e_msg)
if stderr.count('failed to launch bridge helper'):
if utils_selinux.is_enforcing():
raise exceptions.TestSkipError("SELinux is enabled "
"and preventing the "
"bridge helper from "
"accessing the bridge. "
"Consider running as "
"root or placing "
"SELinux into "
"permissive mode.")
# some other problem happened, raise normally
raise
# Wait for the domain to be created
utils_misc.wait_for(func=self.is_alive, timeout=60,
text=("waiting for domain %s to start" %
self.name))
result = virsh.domuuid(self.name, uri=self.connect_uri)
self.uuid = results_stdout_52lts(result).strip()
# Create isa serial ports.
self.create_serial_console()
finally:
fcntl.lockf(lockfile, fcntl.LOCK_UN)
lockfile.close()
def uptime(self, connect_uri=None):
"""
Get uptime of the vm instance.
:param connect_uri: Libvirt connect uri of vm
:return: uptime of the vm on success, None on failure
"""
if connect_uri:
self.connect_uri = connect_uri
session = self.wait_for_serial_login()
else:
session = self.wait_for_login()
return utils_misc.get_uptime(session)
def migrate(self, dest_uri="", option="--live --timeout 60", extra="",
ignore_status=False, debug=False, virsh_opt=""):
"""
Migrate a VM to a remote host.
:param dest_uri: Destination libvirt URI
:param option: Migration options before <domain> <desturi>
:param extra: Migration options after <domain> <desturi>
:return: True if command succeeded
"""
logging.info("Migrating VM %s from %s to %s" %
(self.name, self.connect_uri, dest_uri))
result = virsh.migrate(self.name, dest_uri, option,
extra, uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug, virsh_opt=virsh_opt)
# Close down serial_console logging process
self.cleanup_serial_console()
# On successful migration, point to guests new hypervisor.
# Since dest_uri could be None, checking it is necessary.
if result.exit_status == 0 and dest_uri:
self.connect_uri = dest_uri
self.create_serial_console()
return result
def attach_disk(self, source, target=None, prefix="vd", extra="",
ignore_status=False, debug=False):
"""
Attach a disk to VM and return the target device name.
:param source: source of disk device
:param target: target of disk device, None for automatic assignment.
:param prefix: disk device prefix.
:param extra: additional arguments to command
:return: target device name if succeed, Otherwise None
"""
# Find the next available target device name.
if target is None:
disks = self.get_disk_devices()
for ch in string.ascii_lowercase:
target = prefix + ch
if target not in disks:
break
result = virsh.attach_disk(self.name, source, target, extra,
uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug)
if result.exit_status:
logging.error("Failed to attach disk %s to VM."
"Detail: %s."
% (source, results_stderr_52lts(result)))
return None
return target
def detach_disk(self, target, extra="",
ignore_status=False, debug=False):
"""
Detach a disk from VM.
:param target: target of disk device need to be detached.
:param extra: additional arguments to command
"""
return virsh.detach_disk(self.name, target, extra,
uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug)
def attach_interface(self, option="", ignore_status=False,
debug=False):
"""
Attach a NIC to VM.
"""
return virsh.attach_interface(self.name, option,
uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug)
def detach_interface(self, option="", ignore_status=False,
debug=False):
"""
Detach a NIC from VM.
"""
return virsh.detach_interface(self.name, option,
uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug)
def destroy(self, gracefully=True, free_mac_addresses=True):
"""
Destroy the VM.
If gracefully is True, first attempt to shutdown the VM with a shell
command. If that fails, send SIGKILL to the qemu process.
:param gracefully: If True, an attempt will be made to end the VM
using a shell command before trying to end the qemu process
with a 'quit' or a kill signal.
:param free_mac_addresses: If vm is undefined with libvirt, also
release/reset associated mac address
"""
try:
# Is it already dead?
if self.is_alive():
logging.debug("Destroying VM")
if self.is_paused():
self.resume()
if (not self.is_lxc() and gracefully and
self.params.get("shutdown_command")):
# Try to destroy with shell command
logging.debug("Trying to shutdown VM with shell command")
try:
session = self.login()
except (remote.LoginError, virt_vm.VMError) as e:
logging.debug(e)
else:
try:
# Send the shutdown command
session.sendline(
self.params.get("shutdown_command"))
logging.debug("Shutdown command sent; waiting for VM "
"to go down...")
if utils_misc.wait_for(self.is_dead, 60, 1, 1):
logging.debug("VM is down")
return
finally:
session.close()
# Destroy VM directly, as 'ignore_status=True' by default, so destroy
# a shutoff domain is also acceptable here.
destroy_opt = ''
if gracefully:
destroy_opt = '--graceful'
virsh.destroy(self.name, destroy_opt, uri=self.connect_uri)
finally:
self.cleanup_serial_console()
if free_mac_addresses:
if self.is_persistent():
logging.warning("Requested MAC address release from "
"persistent vm %s. Ignoring." % self.name)
else:
logging.debug("Releasing MAC addresses for vm %s." % self.name)
for nic_name in self.virtnet.nic_name_list():
self.virtnet.free_mac_address(nic_name)
def remove(self):
self.destroy(gracefully=True, free_mac_addresses=False)
# If the current machine contains nvram, we have to set --nvram
if self.params.get("vir_domain_undefine_nvram") == "yes":
options = "--nvram"
else:
options = None
if not self.undefine(options):
raise virt_vm.VMRemoveError("VM '%s' undefine error" % self.name)
self.destroy(gracefully=False, free_mac_addresses=True)
logging.debug("VM '%s' was removed", self.name)
def remove_with_storage(self):
"""
Virsh undefine provides an option named --remove-all-storage, but it
only removes the storage which is managed by libvirt.
This method undefines vm and removes the all storages related with this
vm, no matter storages are managed by libvirt or not.
"""
blklist = list(self.get_disk_devices().values())
self.remove()
for blk in blklist:
path = blk['source']
if os.path.exists(path):
os.remove(path)
def get_uuid(self):
"""
Return VM's UUID.
"""
result = virsh.domuuid(self.name, uri=self.connect_uri)
uuid = results_stdout_52lts(result).strip()
# only overwrite it if it's not set
if self.uuid is None:
self.uuid = uuid
return self.uuid
def get_ifname(self, nic_index=0):
raise NotImplementedError
def get_virsh_mac_address(self, nic_index=0):
"""
Get the MAC of this VM domain.
:param nic_index: Index of the NIC
:raise VMMACAddressMissingError: If no MAC address is defined for the
requested NIC
"""
cmd_result = virsh.dumpxml(self.name, uri=self.connect_uri)
if cmd_result.exit_status:
raise exceptions.TestFail("dumpxml %s failed.\n"
"Detail: %s.\n" % (self.name, cmd_result))
thexml = results_stdout_52lts(cmd_result).strip()
xtf = xml_utils.XMLTreeFile(thexml)
interfaces = xtf.find('devices').findall('interface')
# Range check
try:
mac = interfaces[nic_index].find('mac').get('address')
if mac is not None:
return mac
except IndexError:
pass # Allow other exceptions through
# IndexError (range check) or mac is None
raise virt_vm.VMMACAddressMissingError(nic_index)
def get_mac_address(self, nic_index=0):
"""
Return the MAC address of a NIC.
:param nic_index: Index of the NIC
:return: MAC address of the NIC
:raise VMMACAddressMissingError: If no MAC address is defined for the
requested NIC
"""
try:
return super(VM, self).get_mac_address(nic_index)
except virt_vm.VMMACAddressMissingError:
mac = self.get_virsh_mac_address(nic_index)
self.virtnet.set_mac_address(nic_index, mac)
return mac
def get_pid(self):
"""
Return the VM's PID.
:return: int with PID. If VM is not alive, returns None.
"""
if self.is_lxc():
pid_file = "/var/run/libvirt/lxc/%s.pid" % self.name
elif self.is_qemu():
pid_file = "/var/run/libvirt/qemu/%s.pid" % self.name
elif self.is_esx():
pid_file = "/var/run/libvirt/esx/%s.pid" % self.name
# TODO: Add more vm driver type
else:
raise ValueError("Unsupport connect uri: %s." % self.connect_uri)
pid = None
if os.path.exists(pid_file):
try:
pid_file_contents = open(pid_file).read()
pid = int(pid_file_contents)
except IOError:
logging.error("Could not read %s to get PID", pid_file)
except TypeError:
logging.error("PID file %s has invalid contents: '%s'",
pid_file, pid_file_contents)
else:
logging.debug("PID file %s not present", pid_file)
return pid
def get_vcpus_pid(self):
"""
Return the vcpu's pid for a given VM.
:return: list of PID of vcpus of a VM.
"""
output = virsh.qemu_monitor_command(self.name, "info cpus", "--hmp",
uri=self.connect_uri)
vcpu_pids = re.findall(r'thread_id=(\d+)',
results_stdout_52lts(output))
return vcpu_pids
def get_shell_pid(self):
"""
Return the PID of the parent shell process.
:note: This works under the assumption that ``self.process.get_pid()``
returns the PID of the parent shell process.
"""
return self.process.get_pid()
def get_shared_meminfo(self):
"""
Returns the VM's shared memory information.
:return: Shared memory used by VM (MB)
"""
if self.is_dead():
logging.error("Could not get shared memory info from dead VM.")
return None
filename = "/proc/%d/statm" % self.get_pid()
| shm = int(open(filename).read().split()[2]) | 7,764 | lcc_e | python | null | 8c6ea8a1244dc3cdcdb73c893e4496a60dacd26b57a7b28c |
|
import os
import sys
import json
import time
import uuid
import base64
import gevent
import itertools
from hashlib import sha256
from datetime import datetime
from collections import namedtuple
from flask import (request, g, Blueprint, make_response, Response,
stream_with_context)
from flask import jsonify as flask_jsonify
from flask.ext.restful import reqparse
from sqlalchemy import asc, func
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import joinedload, load_only
from inbox.models import (Message, Block, Part, Thread, Namespace,
Contact, Calendar, Event, Transaction,
DataProcessingCache, Category, MessageCategory)
from inbox.models.event import RecurringEvent, RecurringEventOverride
from inbox.models.category import EPOCH
from inbox.models.backends.generic import GenericAccount
from inbox.api.sending import (send_draft, send_raw_mime, send_draft_copy,
update_draft_on_send)
from inbox.api.update import update_message, update_thread
from inbox.api.kellogs import APIEncoder
from inbox.api import filtering
from inbox.api.validation import (valid_account, get_attachments, get_calendar,
get_recipients, get_draft, valid_public_id,
valid_event, valid_event_update, timestamp,
bounded_str, view, strict_parse_args,
limit, offset, ValidatableArgument,
strict_bool, validate_draft_recipients,
valid_delta_object_types, valid_display_name,
noop_event_update, valid_category_type,
comma_separated_email_list,
get_sending_draft)
from inbox.config import config
from inbox.contacts.algorithms import (calculate_contact_scores,
calculate_group_scores,
calculate_group_counts, is_stale)
import inbox.contacts.crud
from inbox.contacts.search import ContactSearchClient
from inbox.sendmail.base import (create_message_from_json, update_draft,
delete_draft, create_draft_from_mime,
SendMailException)
from inbox.ignition import engine_manager
from inbox.models.action_log import schedule_action
from inbox.models.session import new_session, session_scope
from inbox.search.base import get_search_client, SearchBackendException, SearchStoreException
from inbox.transactions import delta_sync
from inbox.api.err import (err, APIException, NotFoundError, InputError,
AccountDoesNotExistError, log_exception)
from inbox.events.ical import generate_rsvp, send_rsvp
from inbox.events.util import removed_participants
from inbox.util import blockstore
from inbox.util.misc import imap_folder_path
from inbox.actions.backends.generic import remote_delete_sent
from inbox.crispin import writable_connection_pool
from inbox.s3.base import get_raw_from_provider
from inbox.s3.exc import (EmailFetchException, TemporaryEmailFetchException,
EmailDeletedException)
from inbox.util.stats import statsd_client
try:
from inbox.util.eas.codes import STORE_STATUS_CODES
except ImportError:
# Only important for EAS search failures, so shouldn't trigge test fail
pass
from nylas.logging import get_logger
log = get_logger()
DEFAULT_LIMIT = 100
LONG_POLL_REQUEST_TIMEOUT = 120
LONG_POLL_POLL_INTERVAL = 1
SEND_TIMEOUT = 60
app = Blueprint(
'namespace_api',
__name__,
url_prefix='')
app.log_exception = log_exception
# Configure mimetype -> extension map
# TODO perhaps expand to encompass non-standard mimetypes too
# see python mimetypes library
common_extensions = {}
mt_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'mime_types.txt')
with open(mt_path, 'r') as f:
for x in f:
x = x.strip()
if not x or x.startswith('#'):
continue
m = x.split()
mime_type, extensions = m[0], m[1:]
assert extensions, 'Must have at least one extension per mimetype'
common_extensions[mime_type.lower()] = extensions[0]
if config.get('DEBUG_PROFILING_ON'):
from inbox.util.debug import attach_pyinstrument_profiler
attach_pyinstrument_profiler()
APIFeatures = namedtuple('APIFeatures', ['optimistic_updates'])
# The Nylas API supports versioning to be fully compatible with
# older clients and apps. Users can specify the version of the
# API they want to work with by setting the Api-Version API
# header. API versions are defined as dates and stored in the
# API_VERSIONS list.
API_VERSIONS = ['2016-03-07', '2016-08-09']
@app.before_request
def start():
g.api_version = request.headers.get('Api-Version', API_VERSIONS[0])
if g.api_version not in API_VERSIONS:
g.api_version = API_VERSIONS[0]
if g.api_version == API_VERSIONS[0]:
g.api_features = APIFeatures(optimistic_updates=True)
else:
g.api_features = APIFeatures(optimistic_updates=False)
request.environ['log_context'] = {
'endpoint': request.endpoint,
'api_version': g.api_version,
'namespace_id': g.namespace_id,
}
engine = engine_manager.get_for_id(g.namespace_id)
g.db_session = new_session(engine)
g.namespace = Namespace.get(g.namespace_id, g.db_session)
if not g.namespace:
# The only way this can occur is if there used to be an account that
# was deleted, but the API access cache entry has not been expired yet.
raise AccountDoesNotExistError()
request.environ['log_context']['account_id'] = g.namespace.account_id
if hasattr(g, 'application_id'):
request.environ['log_context']['application_id'] = g.application_id
is_n1 = request.environ.get('IS_N1', False)
g.encoder = APIEncoder(g.namespace.public_id, is_n1=is_n1)
g.parser = reqparse.RequestParser(argument_class=ValidatableArgument)
g.parser.add_argument('limit', default=DEFAULT_LIMIT, type=limit,
location='args')
g.parser.add_argument('offset', default=0, type=offset, location='args')
@app.before_request
def before_remote_request():
"""
Verify the validity of the account's credentials before performing a
request to the remote server.
The message and thread /search endpoints, and the /send endpoint directly
interact with the remote server. All create, update, delete requests
result in requests to the remote server via action syncback.
"""
# Search uses 'GET', all the other requests we care about use a write
# HTTP method.
if (request.endpoint in ('namespace_api.message_search_api',
'namespace_api.thread_search_api',
'namespace_api.message_streaming_search_api',
'namespace_api.thread_streaming_search_api') or
request.method in ('POST', 'PUT', 'PATCH', 'DELETE')):
if g.namespace:
# Logging provider here to ensure that the provider is only logged for
# requests that modify data or are proxied to remote servers.
request.environ['log_context']['provider'] = \
g.namespace.account.provider
# Disable validation so we can perform requests on paused accounts.
# valid_account(g.namespace)
@app.after_request
def finish(response):
if response.status_code == 200 and hasattr(g, 'db_session'): # be cautious
g.db_session.commit()
if hasattr(g, 'db_session'):
g.db_session.close()
return response
@app.errorhandler(OperationalError)
def handle_operational_error(error):
rule = request.url_rule
if 'send' in rule.rule and 'rsvp' not in rule.rule:
message = "A temporary database error prevented us from serving this request. Your message has NOT been sent. Please try again in a few minutes."
else:
message = "A temporary database error prevented us from serving this request. Please try again."
log.error('MySQL OperationalError', exc_info=True)
response = flask_jsonify(message=message, type='database_error')
response.status_code = 503
return response
@app.errorhandler(NotImplementedError)
def handle_not_implemented_error(error):
request.environ['log_context']['error'] = 'NotImplementedError'
response = flask_jsonify(message="API endpoint not yet implemented",
type='api_error')
response.status_code = 501
return response
@app.errorhandler(APIException)
def handle_input_error(error):
# these "errors" are normal, so we don't need to save a traceback
request.environ['log_context']['error'] = error.__class__.__name__
request.environ['log_context']['error_message'] = error.message
response = flask_jsonify(message=error.message,
type='invalid_request_error')
response.status_code = error.status_code
return response
@app.errorhandler(Exception)
def handle_generic_error(error):
log_exception(sys.exc_info())
response = flask_jsonify(message="An internal error occured. If this issue persists, please contact support@nylas.com and include this request_uid: {}".format(
request.headers.get('X-Unique-ID'), type='api_error'))
response.status_code = 500
return response
@app.route('/account')
def one_account():
g.parser.add_argument('view', type=view, location='args')
args = strict_parse_args(g.parser, request.args)
# Use a new encoder object with the expand parameter set.
encoder = APIEncoder(g.namespace.public_id, args['view'] == 'expanded')
return encoder.jsonify(g.namespace)
#
# Sync status (enable/disable account / throttling)
#
@app.route('/status/', methods=['GET', 'PUT'])
def status():
account = g.namespace.account
# Don't allow resuming accounts marked for deletion.
if account.is_marked_for_deletion:
raise AccountDoesNotExistError()
if request.method == 'PUT':
data = request.get_json(force=True)
if 'sync_should_run' in data:
if data['sync_should_run']:
sync_host = data.get('sync_host', None)
account.enable_sync(sync_host=sync_host)
else:
reason = data.get('disable_reason', None)
account.disable_sync(reason)
if 'throttled' in data:
if data['throttled']:
account.throttled = True
else:
account.throttled = False
return g.encoder.jsonify({
'sync_status': account.sync_status,
'throttled': account.throttled,
})
#
# Threads
#
@app.route('/threads/')
def thread_query_api():
g.parser.add_argument('subject', type=bounded_str, location='args')
g.parser.add_argument('to', type=bounded_str, location='args')
g.parser.add_argument('from', type=bounded_str, location='args')
g.parser.add_argument('cc', type=bounded_str, location='args')
g.parser.add_argument('bcc', type=bounded_str, location='args')
g.parser.add_argument('any_email', type=comma_separated_email_list,
location='args')
g.parser.add_argument('message_id_header', type=bounded_str, location='args')
g.parser.add_argument('started_before', type=timestamp, location='args')
g.parser.add_argument('started_after', type=timestamp, location='args')
g.parser.add_argument('last_message_before', type=timestamp,
location='args')
g.parser.add_argument('last_message_after', type=timestamp,
location='args')
g.parser.add_argument('filename', type=bounded_str, location='args')
g.parser.add_argument('in', type=bounded_str, location='args')
g.parser.add_argument('thread_id', type=valid_public_id, location='args')
g.parser.add_argument('unread', type=strict_bool, location='args')
g.parser.add_argument('starred', type=strict_bool, location='args')
g.parser.add_argument('view', type=view, location='args')
args = strict_parse_args(g.parser, request.args)
threads = filtering.threads(
namespace_id=g.namespace.id,
subject=args['subject'],
thread_public_id=args['thread_id'],
to_addr=args['to'],
from_addr=args['from'],
cc_addr=args['cc'],
bcc_addr=args['bcc'],
any_email=args['any_email'],
message_id_header=args['message_id_header'],
started_before=args['started_before'],
started_after=args['started_after'],
last_message_before=args['last_message_before'],
last_message_after=args['last_message_after'],
filename=args['filename'],
unread=args['unread'],
starred=args['starred'],
in_=args['in'],
limit=args['limit'],
offset=args['offset'],
view=args['view'],
db_session=g.db_session)
# Use a new encoder object with the expand parameter set.
encoder = APIEncoder(g.namespace.public_id,
args['view'] == 'expanded')
return encoder.jsonify(threads)
@app.route('/threads/search', methods=['GET'])
def thread_search_api():
g.parser.add_argument('q', type=bounded_str, location='args')
args = strict_parse_args(g.parser, request.args)
if not args['q']:
err_string = ('GET HTTP method must include query'
' url parameter')
raise InputError(err_string)
try:
search_client = get_search_client(g.namespace.account)
results = search_client.search_threads(g.db_session, args['q'],
offset=args['offset'],
limit=args['limit'])
return g.encoder.jsonify(results)
except SearchBackendException as exc:
kwargs = {}
if exc.server_error:
kwargs['server_error'] = exc.server_error
return err(exc.http_code, exc.message, **kwargs)
except SearchStoreException as exc:
store_status = STORE_STATUS_CODES.get(str(exc.err_code))
kwargs = {}
if store_status.requires_user_action:
kwargs['server_error'] = store_status.resolution
return err(store_status.http_code, store_status.meaning, **kwargs)
@app.route('/threads/search/streaming', methods=['GET'])
def thread_streaming_search_api():
g.parser.add_argument('q', type=bounded_str, location='args')
args = strict_parse_args(g.parser, request.args)
if not args['q']:
err_string = 'GET HTTP method must include query url parameter'
raise InputError(err_string)
try:
search_client = get_search_client(g.namespace.account)
generator = search_client.stream_threads(args['q'])
return Response(stream_with_context(generator()),
mimetype='text/json-stream')
except SearchBackendException as exc:
kwargs = {}
if exc.server_error:
kwargs['server_error'] = exc.server_error
return err(exc.http_code, exc.message, **kwargs)
except SearchStoreException as exc:
store_status = STORE_STATUS_CODES.get(str(exc.err_code))
kwargs = {}
if store_status.requires_user_action:
kwargs['server_error'] = store_status.resolution
return err(store_status.http_code, store_status.meaning, **kwargs)
@app.route('/threads/<public_id>')
def thread_api(public_id):
g.parser.add_argument('view', type=view, location='args')
args = strict_parse_args(g.parser, request.args)
# Use a new encoder object with the expand parameter set.
encoder = APIEncoder(g.namespace.public_id, args['view'] == 'expanded')
try:
valid_public_id(public_id)
thread = g.db_session.query(Thread).filter(
Thread.public_id == public_id, # noqa
Thread.deleted_at == None, # noqa
Thread.namespace_id == g.namespace.id).one()
return encoder.jsonify(thread)
except NoResultFound:
raise NotFoundError("Couldn't find thread `{0}`".format(public_id))
#
# Update thread
#
@app.route('/threads/<public_id>', methods=['PUT', 'PATCH'])
def thread_api_update(public_id):
try:
valid_public_id(public_id)
thread = g.db_session.query(Thread).filter(
Thread.public_id == public_id, # noqa
Thread.deleted_at == None, # noqa
Thread.namespace_id == g.namespace.id).one()
except NoResultFound:
raise NotFoundError("Couldn't find thread `{0}` ".format(public_id))
data = request.get_json(force=True)
if not isinstance(data, dict):
raise InputError('Invalid request body')
update_thread(thread, data, g.db_session,
g.api_features.optimistic_updates)
return g.encoder.jsonify(thread)
#
# Delete thread
#
@app.route('/threads/<public_id>', methods=['DELETE'])
def thread_api_delete(public_id):
""" Moves the thread to the trash """
raise NotImplementedError
##
# Messages
##
@app.route('/messages/')
def message_query_api():
g.parser.add_argument('subject', type=bounded_str, location='args')
g.parser.add_argument('to', type=bounded_str, location='args')
g.parser.add_argument('from', type=bounded_str, location='args')
g.parser.add_argument('cc', type=bounded_str, location='args')
g.parser.add_argument('bcc', type=bounded_str, location='args')
g.parser.add_argument('any_email', type=comma_separated_email_list,
location='args')
g.parser.add_argument('started_before', type=timestamp, location='args')
g.parser.add_argument('started_after', type=timestamp, location='args')
g.parser.add_argument('last_message_before', type=timestamp,
location='args')
g.parser.add_argument('last_message_after', type=timestamp,
location='args')
g.parser.add_argument('received_before', type=timestamp,
location='args')
g.parser.add_argument('received_after', type=timestamp,
location='args')
g.parser.add_argument('filename', type=bounded_str, location='args')
g.parser.add_argument('in', type=bounded_str, location='args')
g.parser.add_argument('thread_id', type=valid_public_id, location='args')
g.parser.add_argument('unread', type=strict_bool, location='args')
g.parser.add_argument('starred', type=strict_bool, location='args')
g.parser.add_argument('view', type=view, location='args')
args = strict_parse_args(g.parser, request.args)
messages = filtering.messages_or_drafts(
namespace_id=g.namespace.id,
drafts=False,
subject=args['subject'],
thread_public_id=args['thread_id'],
to_addr=args['to'],
from_addr=args['from'],
cc_addr=args['cc'],
bcc_addr=args['bcc'],
any_email=args['any_email'],
started_before=args['started_before'],
started_after=args['started_after'],
last_message_before=args['last_message_before'],
last_message_after=args['last_message_after'],
received_before=args['received_before'],
received_after=args['received_after'],
filename=args['filename'],
in_=args['in'],
unread=args['unread'],
starred=args['starred'],
limit=args['limit'],
offset=args['offset'],
view=args['view'],
db_session=g.db_session)
# Use a new encoder object with the expand parameter set.
encoder = APIEncoder(g.namespace.public_id, args['view'] == 'expanded')
return encoder.jsonify(messages)
@app.route('/messages/search', methods=['GET'])
def message_search_api():
g.parser.add_argument('q', type=bounded_str, location='args')
args = strict_parse_args(g.parser, request.args)
if not args['q']:
err_string = 'GET HTTP method must include query url parameter'
raise InputError(err_string)
try:
search_client = get_search_client(g.namespace.account)
results = search_client.search_messages(g.db_session, args['q'],
offset=args['offset'],
limit=args['limit'])
return g.encoder.jsonify(results)
except SearchBackendException as exc:
kwargs = {}
if exc.server_error:
kwargs['server_error'] = exc.server_error
return err(exc.http_code, exc.message, **kwargs)
except SearchStoreException as exc:
store_status = STORE_STATUS_CODES.get(str(exc.err_code))
kwargs = {}
if store_status.requires_user_action:
kwargs['server_error'] = store_status.resolution
return err(store_status.http_code, store_status.meaning, **kwargs)
@app.route('/messages/search/streaming', methods=['GET'])
def message_streaming_search_api():
g.parser.add_argument('q', type=bounded_str, location='args')
args = strict_parse_args(g.parser, request.args)
if not args['q']:
err_string = 'GET HTTP method must include query url parameter'
raise InputError(err_string)
try:
search_client = get_search_client(g.namespace.account)
generator = search_client.stream_messages(args['q'])
return Response(stream_with_context(generator()),
mimetype='text/json-stream')
except SearchBackendException as exc:
kwargs = {}
if exc.server_error:
kwargs['server_error'] = exc.server_error
return err(exc.http_code, exc.message, **kwargs)
except SearchStoreException as exc:
store_status = STORE_STATUS_CODES.get(str(exc.err_code))
kwargs = {}
if store_status.requires_user_action:
kwargs['server_error'] = store_status.resolution
return err(store_status.http_code, store_status.meaning, **kwargs)
@app.route('/messages/<public_id>', methods=['GET'])
def message_read_api(public_id):
g.parser.add_argument('view', type=view, location='args')
args = strict_parse_args(g.parser, request.args)
encoder = APIEncoder(g.namespace.public_id, args['view'] == 'expanded')
try:
valid_public_id(public_id)
message = Message.from_public_id(public_id, g.namespace.id,
g.db_session)
except NoResultFound:
raise NotFoundError("Couldn't find message {0}".format(public_id))
if request.headers.get('Accept', None) == 'message/rfc822':
raw_message = blockstore.get_from_blockstore(message.data_sha256)
if raw_message is not None:
return Response(raw_message, mimetype='message/rfc822')
else:
# Try getting the message from the email provider.
account = g.namespace.account
statsd_string = 'api.direct_fetching.{}.{}'\
.format(account.provider, account.id)
try:
with statsd_client.timer('{}.provider_latency'.format(
statsd_string)):
contents = get_raw_from_provider(message)
statsd_client.incr('{}.successes'.format(statsd_string))
except TemporaryEmailFetchException:
statsd_client.incr(
'{}.temporary_failure'.format(statsd_string))
log.warning('Exception when fetching email',
account_id=account.id, provider=account.provider,
logstash_tag='direct_fetching', exc_info=True)
return err(503, "Email server returned a temporary error. "
"Please try again in a few minutes.")
except EmailDeletedException:
statsd_client.incr('{}.deleted'.format(statsd_string))
log.warning('Exception when fetching email',
account_id=account.id, provider=account.provider,
logstash_tag='direct_fetching', exc_info=True)
return err(404, "The data was deleted on the email server.")
except EmailFetchException:
statsd_client.incr('{}.failures'.format(statsd_string))
log.warning('Exception when fetching email',
account_id=account.id, provider=account.provider,
logstash_tag='direct_fetching', exc_info=True)
return err(404, "Couldn't find data on the email server.")
if contents is not None:
# If we found it, save it too.
data_sha256 = sha256(contents).hexdigest()
blockstore.save_to_blockstore(data_sha256, contents)
return contents
request.environ['log_context']['message_id'] = message.id
raise NotFoundError(
"Couldn't find raw contents for message `{0}`. "
"Please try again in a few minutes."
.format(public_id))
return encoder.jsonify(message)
@app.route('/messages/<public_id>', methods=['PUT', 'PATCH'])
def message_update_api(public_id):
try:
valid_public_id(public_id)
message = g.db_session.query(Message).filter(
Message.public_id == public_id,
Message.namespace_id == g.namespace.id).one()
except NoResultFound:
raise NotFoundError("Couldn't find message {0} ".format(public_id))
data = request.get_json(force=True)
if not isinstance(data, dict):
raise InputError('Invalid request body')
update_message(message, data, g.db_session,
g.api_features.optimistic_updates)
return g.encoder.jsonify(message)
# Folders / Labels
@app.route('/folders')
@app.route('/labels')
def folders_labels_query_api():
category_type = g.namespace.account.category_type
rule = request.url_rule.rule
valid_category_type(category_type, rule)
g.parser.add_argument('view', type=bounded_str, location='args')
args = strict_parse_args(g.parser, request.args)
if args['view'] == 'count':
results = g.db_session.query(func.count(Category.id))
elif args['view'] == 'ids':
results = g.db_session.query(Category.public_id)
else:
results = g.db_session.query(Category)
results = results.filter(Category.namespace_id == g.namespace.id,
Category.deleted_at == EPOCH) # noqa
results = results.order_by(asc(Category.id))
if args['view'] == 'count':
return g.encoder.jsonify({"count": results.scalar()})
results = results.limit(args['limit']).offset(args['offset']).all()
if args['view'] == 'ids':
return g.encoder.jsonify([r for r, in results])
return g.encoder.jsonify(results)
@app.route('/folders/<public_id>')
def folder_api(public_id):
return folders_labels_api_impl(public_id)
@app.route('/labels/<public_id>')
def label_api(public_id):
return folders_labels_api_impl(public_id)
def folders_labels_api_impl(public_id):
category_type = g.namespace.account.category_type
rule = request.url_rule.rule
valid_category_type(category_type, rule)
valid_public_id(public_id)
try:
category = g.db_session.query(Category).filter(
Category.namespace_id == g.namespace.id,
Category.public_id == public_id,
Category.deleted_at == EPOCH).one() # noqa
except NoResultFound:
raise NotFoundError('Object not found')
return g.encoder.jsonify(category)
@app.route('/folders', methods=['POST'])
@app.route('/labels', methods=['POST'])
def folders_labels_create_api():
category_type = g.namespace.account.category_type
rule = request.url_rule.rule
valid_category_type(category_type, rule)
data = request.get_json(force=True)
display_name = data.get('display_name')
# Validates the display_name and checks if there is a non-deleted Category
# with this display_name already. If so, we do not allow creating a
# duplicate.
valid_display_name(g.namespace.id, category_type, display_name,
g.db_session)
if g.namespace.account.provider not in ['gmail', 'eas']:
# Translate the name of the folder to an actual IMAP name
# (e.g: "Accounting/Taxes" becomes "Accounting.Taxes")
display_name = imap_folder_path(
display_name,
separator=g.namespace.account.folder_separator,
prefix=g.namespace.account.folder_prefix)
category = Category.find_or_create(g.db_session, g.namespace.id,
name=None, display_name=display_name,
type_=category_type)
if category.is_deleted:
# The existing category is soft-deleted and will be hard-deleted,
# so it is okay to create a new category with the same (display_name,
# name).
# NOTE: We do not simply "undelete" the existing category, by setting
# its `deleted_at`=EPOCH, because doing so would not be consistent with
# the API's semantics -- we want the newly created object to have a
# different ID.
category = Category.create(g.db_session, namespace_id=g.namespace.id,
name=None, display_name=display_name,
type_=category_type)
g.db_session.add(category)
g.db_session.flush()
if category_type == 'folder':
schedule_action('create_folder', category, g.namespace.id,
g.db_session)
else:
schedule_action('create_label', category, g.namespace.id, g.db_session)
return g.encoder.jsonify(category)
@app.route('/folders/<public_id>', methods=['PUT', 'PATCH'])
@app.route('/labels/<public_id>', methods=['PUT', 'PATCH'])
def folder_label_update_api(public_id):
category_type = g.namespace.account.category_type
rule = request.url_rule.rule
valid_category_type(category_type, rule)
valid_public_id(public_id)
try:
category = g.db_session.query(Category).filter(
Category.namespace_id == g.namespace.id,
Category.public_id == public_id,
Category.deleted_at == EPOCH).one() # noqa
except NoResultFound:
raise InputError("Couldn't find {} {}".format(
category_type, public_id))
if category.name:
raise InputError("Cannot modify a standard {}".format(category_type))
data = request.get_json(force=True)
display_name = data.get('display_name')
valid_display_name(g.namespace.id, category_type, display_name,
g.db_session)
if g.namespace.account.provider not in ['gmail', 'eas']:
# Translate the name of the folder to an actual IMAP name
# (e.g: "Accounting/Taxes" becomes "Accounting.Taxes")
display_name = imap_folder_path(
display_name,
separator=g.namespace.account.folder_separator,
prefix=g.namespace.account.folder_prefix)
current_name = category.display_name
if g.api_features.optimistic_updates:
# Update optimistically.
category.display_name = display_name
g.db_session.flush()
if category_type == 'folder':
schedule_action('update_folder', category, g.namespace.id,
g.db_session, old_name=current_name,
new_name=display_name)
else:
schedule_action('update_label', category, g.namespace.id,
g.db_session, old_name=current_name,
new_name=display_name)
return g.encoder.jsonify(category)
@app.route('/folders/<public_id>', methods=['DELETE'])
@app.route('/labels/<public_id>', methods=['DELETE'])
def folder_label_delete_api(public_id):
category_type = g.namespace.account.category_type
rule = request.url_rule.rule
valid_category_type(category_type, rule)
valid_public_id(public_id)
try:
category = g.db_session.query(Category).filter(
Category.namespace_id == g.namespace.id,
Category.public_id == public_id,
Category.deleted_at == EPOCH).one() # noqa
except NoResultFound:
raise InputError("Couldn't find {} {}".format(
category_type, public_id))
if category.name:
raise InputError("Cannot modify a standard {}".format(category_type))
if category.type_ == 'folder':
messages_with_category = g.db_session.query(MessageCategory).filter(
MessageCategory.category_id == category.id).exists()
messages_exist = g.db_session.query(messages_with_category).scalar()
if messages_exist:
raise InputError(
"Folder {} cannot be deleted because it contains messages.".
format(public_id))
if g.api_features.optimistic_updates:
deleted_at = datetime.utcnow()
category.deleted_at = deleted_at
folders = category.folders if g.namespace.account.discriminator \
!= 'easaccount' else category.easfolders
for folder in folders:
folder.deleted_at = deleted_at
schedule_action('delete_folder', category, g.namespace.id,
g.db_session)
else:
if g.api_features.optimistic_updates:
deleted_at = datetime.utcnow()
category.deleted_at = deleted_at
for label in category.labels:
label.deleted_at = deleted_at
schedule_action('delete_label', category, g.namespace.id,
g.db_session)
g.db_session.commit()
return g.encoder.jsonify(None)
#
# Contacts
##
@app.route('/contacts/', methods=['GET'])
def contact_api():
g.parser.add_argument('filter', type=bounded_str, default='',
location='args')
g.parser.add_argument('view', type=bounded_str, location='args')
args = strict_parse_args(g.parser, request.args)
if args['view'] == 'count':
results = g.db_session.query(func.count(Contact.id))
elif args['view'] == 'ids':
results = g.db_session.query(Contact.public_id)
else:
results = g.db_session.query(Contact)
results = results.filter(Contact.namespace_id == g.namespace.id)
if args['filter']:
results = results.filter(Contact.email_address == args['filter'])
results = results.with_hint(
Contact, 'USE INDEX (idx_namespace_created)')\
.order_by(asc(Contact.created_at))
if args['view'] == 'count':
return g.encoder.jsonify({"count": results.scalar()})
if args['view'] != 'ids':
results = results.options(load_only('public_id', '_raw_address', 'name'),
joinedload(Contact.phone_numbers))
results = results.limit(args['limit']).offset(args['offset']).all()
if args['view'] == 'ids':
return g.encoder.jsonify([r for r, in results])
return g.encoder.jsonify(results)
@app.route('/contacts/search', methods=['GET'])
def contact_search_api():
g.parser.add_argument('q', type=bounded_str, location='args')
args = strict_parse_args(g.parser, request.args)
if not args['q']:
err_string = ('GET HTTP method must include query'
' url parameter')
raise InputError(err_string)
search_client = ContactSearchClient(g.namespace.id)
results = search_client.search_contacts(g.db_session, args['q'],
offset=args['offset'],
limit=args['limit'])
return g.encoder.jsonify(results)
@app.route('/contacts/<public_id>', methods=['GET'])
def contact_read_api(public_id):
# Get all data for an existing contact.
valid_public_id(public_id)
result = inbox.contacts.crud.read(g.namespace, g.db_session, public_id)
if result is None:
raise NotFoundError("Couldn't find contact {0}".format(public_id))
return g.encoder.jsonify(result)
##
# Events
##
@app.route('/events/', methods=['GET'])
def event_api():
g.parser.add_argument('event_id', type=valid_public_id, location='args')
g.parser.add_argument('calendar_id', type=valid_public_id, location='args')
g.parser.add_argument('title', type=bounded_str, location='args')
g.parser.add_argument('description', type=bounded_str, location='args')
g.parser.add_argument('location', type=bounded_str, location='args')
g.parser.add_argument('busy', type=strict_bool, location='args')
g.parser.add_argument('starts_before', type=timestamp, location='args')
g.parser.add_argument('starts_after', type=timestamp, location='args')
g.parser.add_argument('ends_before', type=timestamp, location='args')
g.parser.add_argument('ends_after', type=timestamp, location='args')
g.parser.add_argument('view', type=bounded_str, location='args')
g.parser.add_argument('expand_recurring', type=strict_bool,
location='args')
g.parser.add_argument('show_cancelled', type=strict_bool, location='args')
g.parser.add_argument('title_email', type=bounded_str, location='args')
g.parser.add_argument('description_email', type=bounded_str, location='args')
g.parser.add_argument('owner_email', type=bounded_str, location='args')
g.parser.add_argument('participant_email', type=bounded_str, location='args')
g.parser.add_argument('any_email', type=bounded_str, location='args')
args = strict_parse_args(g.parser, request.args)
results = filtering.events(
namespace_id=g.namespace.id,
event_public_id=args['event_id'],
calendar_public_id=args['calendar_id'],
title=args['title'],
description=args['description'],
location=args['location'],
busy=args['busy'],
title_email=args['title_email'],
description_email=args['description_email'],
owner_email=args['owner_email'],
participant_email=args['participant_email'],
any_email=args['any_email'],
starts_before=args['starts_before'],
starts_after=args['starts_after'],
ends_before=args['ends_before'],
ends_after=args['ends_after'],
limit=args['limit'],
offset=args['offset'],
view=args['view'],
expand_recurring=args['expand_recurring'],
show_cancelled=args['show_cancelled'],
db_session=g.db_session)
return g.encoder.jsonify(results)
@app.route('/events/', methods=['POST'])
def event_create_api():
g.parser.add_argument('notify_participants', type=strict_bool,
location='args')
args = strict_parse_args(g.parser, request.args)
notify_participants = args['notify_participants']
data = request.get_json(force=True)
calendar = get_calendar(data.get('calendar_id'),
g.namespace, g.db_session)
if calendar.read_only:
raise InputError("Can't create events on read_only calendar.")
valid_event(data)
title = data.get('title', '')
description = data.get('description')
location = data.get('location')
when = data.get('when')
busy = data.get('busy')
# client libraries can send explicit key = None automagically
if busy is None:
busy = True
participants = data.get('participants')
if participants is None:
participants = []
for p in participants:
p['email'] = p['email'].lower()
if 'status' not in p:
p['status'] = 'noreply'
event = Event(
calendar=calendar,
namespace=g.namespace,
uid=uuid.uuid4().hex,
provider_name=g.namespace.account.provider,
raw_data='',
title=title,
description=description,
location=location,
busy=busy,
when=when,
read_only=False,
is_owner=True,
participants=participants,
sequence_number=0,
source='local')
g.db_session.add(event)
g.db_session.flush()
schedule_action('create_event', event, g.namespace.id, g.db_session,
calendar_uid=event.calendar.uid,
notify_participants=notify_participants)
return g.encoder.jsonify(event)
@app.route('/events/<public_id>', methods=['GET'])
def event_read_api(public_id):
"""Get all data for an existing event."""
valid_public_id(public_id)
try:
event = g.db_session.query(Event).filter(
Event.namespace_id == g.namespace.id,
Event.public_id == public_id,
Event.deleted_at == None).one() # noqa
except NoResultFound:
raise NotFoundError("Couldn't find event id {0}".format(public_id))
return g.encoder.jsonify(event)
@app.route('/events/<public_id>', methods=['PUT', 'PATCH'])
def event_update_api(public_id):
g.parser.add_argument('notify_participants', type=strict_bool,
location='args')
args = strict_parse_args(g.parser, request.args)
notify_participants = args['notify_participants']
valid_public_id(public_id)
try:
event = g.db_session.query(Event).filter(
Event.public_id == public_id,
Event.namespace_id == g.namespace.id,
Event.deleted_at == None).one() # noqa
except NoResultFound:
raise NotFoundError("Couldn't find event {0}".format(public_id))
# iCalendar-imported files are read-only by default but let's give a
# slightly more helpful error message.
if event.calendar == g.namespace.account.emailed_events_calendar:
raise InputError(
'Can not update an event imported from an iCalendar file.')
if event.read_only:
raise InputError('Cannot update read_only event.')
if (isinstance(event, RecurringEvent) or
isinstance(event, RecurringEventOverride)):
raise InputError('Cannot update a recurring event yet.')
data = request.get_json(force=True)
account = g.namespace.account
valid_event_update(data, g.namespace, g.db_session)
# A list of participants we need to send cancellation invites to.
cancelled_participants = []
if 'participants' in data:
for p in data['participants']:
p['email'] = p['email'].lower()
if 'status' not in p:
p['status'] = 'noreply'
cancelled_participants = removed_participants(event.participants,
data['participants'])
# We're going to save this data into a JSON-like TEXT field in the
# db. With MySQL, this means that the column will be 64k.
# Drop the latest participants until it fits in the column.
while len(json.dumps(cancelled_participants)) > 63000:
log.warning("Truncating cancelled participants", cancelled_participants=cancelled_participants)
cancelled_participants.pop()
# Don't update an event if we don't need to.
if noop_event_update(event, data):
return g.encoder.jsonify(event)
if g.api_features.optimistic_updates:
for attr in Event.API_MODIFIABLE_FIELDS:
if attr in data:
setattr(event, attr, data[attr])
event.sequence_number += 1
g.db_session.commit()
schedule_action('update_event', event, g.namespace.id, g.db_session,
calendar_uid=event.calendar.uid,
cancelled_participants=cancelled_participants,
notify_participants=notify_participants)
else:
# This isn't an optimistic update, so we need to store the
# updated attributes inside the ActionLog entry.
# Once we've update the event on the backend, we'll be able
# to propagate the changes to our datastore.
kwargs = dict(calendar_uid=event.calendar.uid,
event_data=data,
cancelled_participants=cancelled_participants,
notify_participants=notify_participants)
if len(json.dumps(kwargs)) > 2 ** 16 - 12:
raise InputError(
'Event update too big --- please break it in parts.')
if event.calendar != account.emailed_events_calendar:
schedule_action('update_event', event, g.namespace.id, g.db_session,
**kwargs)
return g.encoder.jsonify(event)
@app.route('/events/<public_id>', methods=['DELETE'])
def event_delete_api(public_id):
g.parser.add_argument('notify_participants', type=strict_bool,
location='args')
args = strict_parse_args(g.parser, request.args)
notify_participants = args['notify_participants']
valid_public_id(public_id)
try:
event = g.db_session.query(Event).filter(
Event.public_id == public_id,
Event.namespace_id == g.namespace.id,
Event.deleted_at == None).one() # noqa
except NoResultFound:
raise NotFoundError("Couldn't find event {0}".format(public_id))
if event.calendar == g.namespace.account.emailed_events_calendar:
raise InputError(
'Can not update an event imported from an iCalendar file.')
if event.calendar.read_only:
raise InputError('Cannot delete event {} from read_only calendar.'.
format(public_id))
if g.api_features.optimistic_updates:
# Set the local event status to 'cancelled' rather than deleting it,
# in order to be consistent with how we sync deleted events from the
# remote, and consequently return them through the events, delta sync
# APIs
event.sequence_number += 1
event.status = 'cancelled'
g.db_session.commit()
schedule_action('delete_event', event, g.namespace.id, g.db_session,
event_uid=event.uid, calendar_name=event.calendar.name,
calendar_uid=event.calendar.uid,
notify_participants=notify_participants)
return g.encoder.jsonify(None)
@app.route('/send-rsvp', methods=['POST'])
def event_rsvp_api():
data = request.get_json(force=True)
event_id = data.get('event_id')
valid_public_id(event_id)
try:
event = g.db_session.query(Event).filter(
Event.public_id == event_id,
Event.namespace_id == g.namespace.id).one()
except NoResultFound:
raise NotFoundError("Couldn't find event {0}".format(event_id))
if event.message is None:
raise InputError('This is not a message imported '
'from an iCalendar invite.')
status = data.get('status')
if not status:
raise InputError('You must define a status to RSVP.')
if status not in ['yes', 'no', 'maybe']:
raise InputError('Invalid status %s' % status)
comment = data.get('comment', '')
# Note: this assumes that the email invite was directly addressed to us
# (i.e: that there's no email alias to redirect ben.bitdiddle@nylas
# to ben@nylas.)
participants = {p["email"]: p for p in event.participants}
account = g.namespace.account
email = account.email_address
if email not in participants:
raise InputError('Cannot find %s among the participants' % email)
p = participants[email]
# Make this API idempotent.
if p["status"] == status:
if 'comment' not in p and 'comment' not in data:
return g.encoder.jsonify(event)
elif ('comment' in p and 'comment' in data and
p['comment'] == data['comment']):
return g.encoder.jsonify(event)
participant = {"email": email, "status": status, "comment": comment}
body_text = comment
ical_data = generate_rsvp(event, participant, account)
if ical_data is None:
raise APIException("Couldn't parse the attached iCalendar invite")
try:
send_rsvp(ical_data, event, body_text, status, account)
except SendMailException as exc:
kwargs = {}
if exc.failures:
kwargs['failures'] = exc.failures
if exc.server_error:
kwargs['server_error'] = exc.server_error
return err(exc.http_code, exc.message, **kwargs)
# Update the participants status too.
new_participants = []
for participant in event.participants:
email = participant.get("email")
if email is not None and email == account.email_address:
participant["status"] = status
if comment != "":
participant["comment"] = comment
new_participants.append(participant)
event.participants = []
for participant in new_participants:
event.participants.append(participant)
g.db_session.commit()
return g.encoder.jsonify(event)
#
# Files
#
@app.route('/files/', methods=['GET'])
def files_api():
g.parser.add_argument('filename', type=bounded_str, location='args')
g.parser.add_argument('message_id', type=valid_public_id, location='args')
g.parser.add_argument('content_type', type=bounded_str, location='args')
g.parser.add_argument('view', type=view, location='args')
args = strict_parse_args(g.parser, request.args)
files = filtering.files(
namespace_id=g.namespace.id,
message_public_id=args['message_id'],
filename=args['filename'],
content_type=args['content_type'],
limit=args['limit'],
offset=args['offset'],
view=args['view'],
db_session=g.db_session)
return g.encoder.jsonify(files)
@app.route('/files/<public_id>', methods=['GET'])
def file_read_api(public_id):
valid_public_id(public_id)
try:
f = g.db_session.query(Block).filter(
Block.public_id == public_id,
Block.namespace_id == g.namespace.id).one()
return g.encoder.jsonify(f)
except NoResultFound:
raise NotFoundError("Couldn't find file {0} ".format(public_id))
@app.route('/files/<public_id>', methods=['DELETE'])
def file_delete_api(public_id):
valid_public_id(public_id)
try:
f = g.db_session.query(Block).filter(
Block.public_id == public_id,
Block.namespace_id == g.namespace.id).one()
if g.db_session.query(Block).join(Part) \
.filter(Block.public_id == public_id).first() is not None:
raise InputError("Can't delete file that is attachment.")
g.db_session.delete(f)
g.db_session.commit()
# This is essentially what our other API endpoints do after deleting.
# Effectively no error == success
return g.encoder.jsonify(None)
except NoResultFound:
raise NotFoundError("Couldn't find file {0} ".format(public_id))
#
# Upload file API. This actually supports multiple files at once
# You can test with
# $ curl http://localhost:5555/n/4s4iz36h36w17kumggi36ha2b/files \
# --form upload=@dancingbaby.gif
@app.route('/files/', methods=['POST'])
def file_upload_api():
all_files = []
for name, uploaded in request.files.iteritems():
request.environ['log_context'].setdefault('filenames', []).append(name)
f = Block()
f.namespace = g.namespace
f.content_type = uploaded.content_type
f.filename = uploaded.filename
f.data = uploaded.read()
all_files.append(f)
g.db_session.add_all(all_files)
g.db_session.commit() # to generate public_ids
return g.encoder.jsonify(all_files)
#
# File downloads
#
@app.route('/files/<public_id>/download')
def file_download_api(public_id):
valid_public_id(public_id)
try:
f = g.db_session.query(Block).filter(
Block.public_id == public_id,
Block.namespace_id == g.namespace.id).one()
except NoResultFound:
raise NotFoundError("Couldn't find file {0} ".format(public_id))
# Here we figure out the filename.extension given the
# properties which were set on the original attachment
# TODO consider using werkzeug.secure_filename to sanitize?
if f.content_type:
ct = f.content_type.lower()
else:
# TODO Detect the content-type using the magic library
# and set ct = the content type, which is used below
request.environ['log_context']['no_content_type'] = True
ct = 'text/plain'
request.environ['log_context']['content_type'] = ct
if f.filename:
name = f.filename
else:
request.environ['log_context']['no_filename'] = True
if ct in common_extensions:
name = 'attachment.{0}'.format(common_extensions[ct])
else:
# HACK just append the major part of the content type
name = 'attachment.{0}'.format(ct.split('/')[0])
# TODO the part.data object should really behave like a stream we can read
# & write to
try:
account = g.namespace.account
statsd_string = 'api.direct_fetching.{}.{}'.format(account.provider,
account.id)
response = make_response(f.data)
statsd_client.incr('{}.successes'.format(statsd_string))
except TemporaryEmailFetchException:
statsd_client.incr('{}.temporary_failure'.format(statsd_string))
log.warning('Exception when fetching email',
account_id=account.id, provider=account.provider,
logstash_tag='direct_fetching', exc_info=True)
return err(503, "Email server returned a temporary error. "
"Please try again in a few minutes.")
except EmailDeletedException:
statsd_client.incr('{}.deleted'.format(statsd_string))
log.warning('Exception when fetching email',
account_id=account.id, provider=account.provider,
logstash_tag='direct_fetching', exc_info=True)
return err(404, "The data was deleted on the email server.")
except EmailFetchException:
statsd_client.incr('{}.failures'.format(statsd_string))
log.warning('Exception when fetching email',
logstash_tag='direct_fetching', exc_info=True)
return err(404, "Couldn't find data on email server.")
response.headers['Content-Type'] = 'application/octet-stream' # ct
# Werkzeug will try to encode non-ascii header values as latin-1. Try that
# first; if it fails, use RFC2047/MIME encoding. See
# https://tools.ietf.org/html/rfc7230#section-3.2.4.
try:
name = name.encode('latin-1')
except UnicodeEncodeError:
name = '=?utf-8?b?' + base64.b64encode(name.encode('utf-8')) + '?='
response.headers['Content-Disposition'] = \
'attachment; filename={0}'.format(name)
request.environ['log_context']['headers'] = response.headers
return response
##
# Calendars
##
@app.route('/calendars/', methods=['GET'])
def calendar_api():
g.parser.add_argument('view', type=view, location='args')
args = strict_parse_args(g.parser, request.args)
if args['view'] == 'count':
query = g.db_session.query(func.count(Calendar.id))
elif args['view'] == 'ids':
query = g.db_session.query(Calendar.public_id)
else:
query = g.db_session.query(Calendar)
results = query.filter(Calendar.namespace_id == g.namespace.id). \
order_by(asc(Calendar.id))
if args['view'] == 'count':
return g.encoder.jsonify({"count": results.scalar()})
results = results.limit(args['limit']).offset(args['offset']).all()
if args['view'] == 'ids':
return g.encoder.jsonify([r for r, in results])
return g.encoder.jsonify(results)
@app.route('/calendars/<public_id>', methods=['GET'])
def calendar_read_api(public_id):
"""Get all data for an existing calendar."""
valid_public_id(public_id)
try:
calendar = g.db_session.query(Calendar).filter(
Calendar.public_id == public_id,
Calendar.namespace_id == g.namespace.id).one()
except NoResultFound:
raise NotFoundError("Couldn't find calendar {0}".format(public_id))
return g.encoder.jsonify(calendar)
##
# Drafts
##
# TODO(emfree, kavya): Systematically validate user input, and return
# meaningful errors for invalid input.
@app.route('/drafts/', methods=['GET'])
def draft_query_api():
g.parser.add_argument('subject', type=bounded_str, location='args')
g.parser.add_argument('to', type=bounded_str, location='args')
g.parser.add_argument('cc', type=bounded_str, location='args')
g.parser.add_argument('bcc', type=bounded_str, location='args')
g.parser.add_argument('any_email', type=comma_separated_email_list,
location='args')
g.parser.add_argument('started_before', type=timestamp, location='args')
g.parser.add_argument('started_after', type=timestamp, location='args')
g.parser.add_argument('last_message_before', type=timestamp,
location='args')
g.parser.add_argument('last_message_after', type=timestamp,
location='args')
g.parser.add_argument('received_before', type=timestamp,
location='args')
g.parser.add_argument('received_after', type=timestamp,
location='args')
g.parser.add_argument('filename', type=bounded_str, location='args')
g.parser.add_argument('in', type=bounded_str, location='args')
g.parser.add_argument('thread_id', type=valid_public_id, location='args')
g.parser.add_argument('unread', type=strict_bool, location='args')
g.parser.add_argument('starred', type=strict_bool, location='args')
g.parser.add_argument('view', type=view, location='args')
args = strict_parse_args(g.parser, request.args)
drafts = filtering.messages_or_drafts(
namespace_id=g.namespace.id,
drafts=True,
subject=args['subject'],
thread_public_id=args['thread_id'],
to_addr=args['to'],
from_addr=None,
cc_addr=args['cc'],
bcc_addr=args['bcc'],
any_email=args['any_email'],
started_before=args['started_before'],
started_after=args['started_after'],
last_message_before=args['last_message_before'],
last_message_after=args['last_message_after'],
received_before=args['received_before'],
received_after=args['received_after'],
filename=args['filename'],
in_=args['in'],
unread=args['unread'],
starred=args['starred'],
limit=args['limit'],
offset=args['offset'],
view=args['view'],
db_session=g.db_session)
return g.encoder.jsonify(drafts)
@app.route('/drafts/<public_id>', methods=['GET'])
def draft_get_api(public_id):
valid_public_id(public_id)
draft = g.db_session.query(Message).filter(
Message.public_id == public_id,
Message.namespace_id == g.namespace.id).first()
if draft is None:
raise NotFoundError("Couldn't find draft {}".format(public_id))
return g.encoder.jsonify(draft)
@app.route('/drafts/', methods=['POST'])
def draft_create_api():
data = request.get_json(force=True)
draft = create_message_from_json(data, g.namespace, g.db_session,
is_draft=True)
return g.encoder.jsonify(draft)
@app.route('/drafts/<public_id>', methods=['PUT', 'PATCH'])
def draft_update_api(public_id):
data = request.get_json(force=True)
original_draft = get_draft(public_id, data.get('version'), g.namespace.id,
g.db_session)
# TODO(emfree): what if you try to update a draft on a *thread* that's been
# deleted?
data = request.get_json(force=True)
to = get_recipients(data.get('to'), 'to')
cc = get_recipients(data.get('cc'), 'cc')
bcc = get_recipients(data.get('bcc'), 'bcc')
from_addr = get_recipients(data.get('from_addr'), 'from_addr')
reply_to = get_recipients(data.get('reply_to'), 'reply_to')
if from_addr and len(from_addr) > 1:
raise InputError("from_addr field can have at most one item")
if reply_to and len(reply_to) > 1:
raise InputError("reply_to field can have at most one item")
subject = data.get('subject')
body = data.get('body')
files = get_attachments(data.get('file_ids'), g.namespace.id, g.db_session)
draft = update_draft(g.db_session, g.namespace.account, original_draft,
to, subject, body, files, cc, bcc, from_addr,
reply_to)
return g.encoder.jsonify(draft)
@app.route('/drafts/<public_id>', methods=['DELETE'])
def draft_delete_api(public_id):
data = request.get_json(force=True)
# Validate draft id, version, etc.
draft = get_draft(public_id, data.get('version'), g.namespace.id,
g.db_session)
result = delete_draft(g.db_session, g.namespace.account, draft)
return g.encoder.jsonify(result)
@app.route('/send', methods=['POST'])
@app.route('/send-with-features', methods=['POST']) # TODO deprecate this URL
def draft_send_api():
request_started = time.time()
account = g.namespace.account
if request.content_type == "message/rfc822":
draft = create_draft_from_mime(account, request.data,
g.db_session)
validate_draft_recipients(draft)
if isinstance(account, GenericAccount):
schedule_action('save_sent_email', draft, draft.namespace.id,
g.db_session)
resp = send_raw_mime(account, g.db_session, draft)
return resp
data = request.get_json(force=True)
# Check if using tracking
tracking_options = data.get('tracking', {})
draft_public_id = data.get('draft_id')
if draft_public_id is not None:
draft = get_draft(draft_public_id, data.get('version'),
g.namespace.id, g.db_session)
else:
draft = create_message_from_json(data, g.namespace,
g.db_session, is_draft=False)
validate_draft_recipients(draft)
if tracking_options: # Open/Link/Reply tracking set
try:
from redwood.api.tracking import handle_tracking_options
except ImportError:
return err(501,
'Tracking is not implemented in the open source '
'Nylas Cloud API. See our hosted version for this '
'feature. https://nylas.com/cloud')
assert hasattr(g, 'application_id'), \
'Tracking requires application ID'
handle_tracking_options(
mailsync_db_session=g.db_session,
tracking_options=tracking_options,
draft=draft,
application_id=g.application_id)
if isinstance(account, GenericAccount):
schedule_action('save_sent_email', draft, draft.namespace.id,
g.db_session)
if time.time() - request_started > SEND_TIMEOUT:
# Preemptively time out the request if we got stuck doing database work
# -- we don't want clients to disconnect and then still send the
# message.
return err(504, 'Request timed out.')
resp = send_draft(account, draft, g.db_session)
# Only delete the draft once we know it has been sent
if draft_public_id is not None and resp.status_code == 200:
schedule_action('delete_draft', draft, draft.namespace.id,
g.db_session, nylas_uid=draft.nylas_uid,
message_id_header=draft.message_id_header)
return resp
@app.route('/send-multiple', methods=['POST'])
def multi_send_create():
"""Initiates a multi-send session by creating a new multi-send draft."""
account = g.namespace.account
if account.discriminator == 'easaccount':
raise InputError('Multiple send is not supported for this provider.')
data = request.get_json(force=True)
# Make a new draft and don't save it to the remote (by passing
# is_draft=False)
draft = create_message_from_json(data, g.namespace,
g.db_session, is_draft=False)
validate_draft_recipients(draft)
# Mark the draft as sending, which ensures that it cannot be modified.
draft.mark_as_sending()
g.db_session.add(draft)
request.environ['log_context']['draft_public_id'] = draft.public_id
return g.encoder.jsonify(draft)
@app.route('/send-multiple/<draft_id>', methods=['POST'])
def multi_send(draft_id):
"""Performs a single send operation in an individualized multi-send
session. Sends a copy of the draft at draft_id to the specified address
with the specified body, and ensures that a corresponding sent message is
either not created in the user's Sent folder or is immediately
deleted from it."""
request_started = time.time()
account = g.namespace.account
if account.discriminator == 'easaccount':
raise InputError('Multiple send is not supported for this provider.')
data = request.get_json(force=True)
valid_public_id(draft_id)
body = data.get('body')
send_to = get_recipients([data.get('send_to')], 'to')[0]
draft = get_sending_draft(draft_id, g.namespace.id, g.db_session)
if not draft.is_sending:
raise InputError('Invalid draft, not part of a multi-send transaction')
emails = {email for name, email in itertools.chain(draft.to_addr,
draft.cc_addr,
draft.bcc_addr)}
if send_to[1] not in emails:
raise InputError('Invalid send_to, not present in message recipients')
if time.time() - request_started > SEND_TIMEOUT:
# Preemptively time out the request if we got stuck doing database work
# -- we don't want clients to disconnect and then still send the
# message.
return err(504, 'Request timed out.')
start_time = time.time()
# Send a copy of the draft with the new body to the send_to address
resp = send_draft_copy(account, draft, body, send_to)
request.environ['log_context']["time_to_send"] = time.time() - start_time
return resp
@app.route('/send-multiple/<draft_id>', methods=['DELETE'])
def multi_send_finish(draft_id):
"""Closes out a multi-send session by marking the sending draft as sent
and moving it to the user's Sent folder."""
account = g.namespace.account
if account.discriminator == 'easaccount':
raise InputError('Multiple send is not supported for this provider.')
valid_public_id(draft_id)
draft = get_sending_draft(draft_id, g.namespace.id, g.db_session)
if not draft.is_sending:
raise InputError('Invalid draft, not part of a multi-send transaction')
# Synchronously delete any matching messages from the sent folder, left
# over from the send calls (in gmail only)
if not isinstance(account, GenericAccount):
try:
with writable_connection_pool(account.id).get() as crispin_client:
remote_delete_sent(crispin_client, account.id,
draft.message_id_header,
delete_multiple=True)
except Exception:
# Even if this fails, we need to finish off the multi-send session
log_exception(sys.exc_info(), draft_public_id=draft.public_id)
# Mark the draft as sent in our database
update_draft_on_send(account, draft, g.db_session)
# Save the sent message with its existing body to the user's sent folder
schedule_action('save_sent_email', draft, draft.namespace.id, g.db_session)
return g.encoder.jsonify(draft)
##
# Client syncing
##
@app.route('/delta')
@app.route('/delta/longpoll')
def sync_deltas():
g.parser.add_argument('cursor', type=valid_public_id, location='args',
required=True)
g.parser.add_argument('exclude_types', type=valid_delta_object_types,
location='args')
g.parser.add_argument('include_types', type=valid_delta_object_types,
location='args')
g.parser.add_argument('timeout', type=int,
default=LONG_POLL_REQUEST_TIMEOUT, location='args')
g.parser.add_argument('view', type=view, location='args')
# - Begin shim -
# Remove after folders and labels exposed in the Delta API for everybody,
# right now, only expose for Edgehill.
# Same for the account object.
g.parser.add_argument('exclude_folders', type=strict_bool, location='args')
g.parser.add_argument('exclude_account', type=strict_bool, location='args',
default=True)
# - End shim -
# Metadata has restricted access - only N1 can make a request with this
# arg included. For everyone else, set exclude_metadata to True by default.
g.parser.add_argument('exclude_metadata', type=strict_bool,
location='args', default=True)
args = strict_parse_args(g.parser, request.args)
| exclude_types = args.get('exclude_types') | 4,956 | lcc_e | python | null | ccfda5c5b2193dd53acce9410d3fbb305d423d4917c636b1 |
|
"""
Models for User Information (students, staff, etc)
Migration Notes
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py lms schemamigration student --auto description_of_your_change
3. Add the migration file created in edx-platform/common/djangoapps/student/migrations/
"""
import hashlib
import json
import logging
import six
import uuid
from collections import OrderedDict, defaultdict, namedtuple
from datetime import datetime, timedelta
from functools import total_ordering
from importlib import import_module
from urllib import urlencode
import analytics
from config_models.models import ConfigurationModel
from django.apps import apps
from django.conf import settings
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.core.cache import cache
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import IntegrityError, models, transaction
from django.db.models import Count, Q
from django.db.models.signals import post_save, pre_save
from django.db.utils import ProgrammingError
from django.dispatch import receiver
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext_noop
from django_countries.fields import CountryField
from edx_rest_api_client.exceptions import SlumberBaseException
from eventtracking import tracker
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from six import text_type
from slumber.exceptions import HttpClientError, HttpServerError
from user_util import user_util
import lms.lib.comment_client as cc
from student.signals import UNENROLL_DONE, ENROLL_STATUS_CHANGE, ENROLLMENT_TRACK_UPDATED
from lms.djangoapps.certificates.models import GeneratedCertificate
from course_modes.models import CourseMode
from courseware.models import (
CourseDynamicUpgradeDeadlineConfiguration,
DynamicUpgradeDeadlineConfiguration,
OrgDynamicUpgradeDeadlineConfiguration
)
from enrollment.api import _default_course_mode
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.request_cache import clear_cache, get_cache
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.xmodule_django.models import NoneToEmptyManager
from openedx.core.djangolib.model_mixins import DeletableByUserValue
from track import contexts
from util.milestones_helpers import is_entrance_exams_enabled
from util.model_utils import emit_field_changed_events, get_changed_fields_dict
from util.query import use_read_replica_if_available
log = logging.getLogger(__name__)
AUDIT_LOG = logging.getLogger("audit")
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore # pylint: disable=invalid-name
# enroll status changed events - signaled to email_marketing. See email_marketing.tasks for more info
# ENROLL signal used for free enrollment only
class EnrollStatusChange(object):
"""
Possible event types for ENROLL_STATUS_CHANGE signal
"""
# enroll for a course
enroll = 'enroll'
# unenroll for a course
unenroll = 'unenroll'
# add an upgrade to cart
upgrade_start = 'upgrade_start'
# complete an upgrade purchase
upgrade_complete = 'upgrade_complete'
# add a paid course to the cart
paid_start = 'paid_start'
# complete a paid course purchase
paid_complete = 'paid_complete'
UNENROLLED_TO_ALLOWEDTOENROLL = 'from unenrolled to allowed to enroll'
ALLOWEDTOENROLL_TO_ENROLLED = 'from allowed to enroll to enrolled'
ENROLLED_TO_ENROLLED = 'from enrolled to enrolled'
ENROLLED_TO_UNENROLLED = 'from enrolled to unenrolled'
UNENROLLED_TO_ENROLLED = 'from unenrolled to enrolled'
ALLOWEDTOENROLL_TO_UNENROLLED = 'from allowed to enroll to enrolled'
UNENROLLED_TO_UNENROLLED = 'from unenrolled to unenrolled'
DEFAULT_TRANSITION_STATE = 'N/A'
SCORE_RECALCULATION_DELAY_ON_ENROLLMENT_UPDATE = 30
TRANSITION_STATES = (
(UNENROLLED_TO_ALLOWEDTOENROLL, UNENROLLED_TO_ALLOWEDTOENROLL),
(ALLOWEDTOENROLL_TO_ENROLLED, ALLOWEDTOENROLL_TO_ENROLLED),
(ENROLLED_TO_ENROLLED, ENROLLED_TO_ENROLLED),
(ENROLLED_TO_UNENROLLED, ENROLLED_TO_UNENROLLED),
(UNENROLLED_TO_ENROLLED, UNENROLLED_TO_ENROLLED),
(ALLOWEDTOENROLL_TO_UNENROLLED, ALLOWEDTOENROLL_TO_UNENROLLED),
(UNENROLLED_TO_UNENROLLED, UNENROLLED_TO_UNENROLLED),
(DEFAULT_TRANSITION_STATE, DEFAULT_TRANSITION_STATE)
)
class AnonymousUserId(models.Model):
"""
This table contains user, course_Id and anonymous_user_id
Purpose of this table is to provide user by anonymous_user_id.
We generate anonymous_user_id using md5 algorithm,
and use result in hex form, so its length is equal to 32 bytes.
"""
objects = NoneToEmptyManager()
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
anonymous_user_id = models.CharField(unique=True, max_length=32)
course_id = CourseKeyField(db_index=True, max_length=255, blank=True)
def anonymous_id_for_user(user, course_id, save=True):
"""
Return a unique id for a (user, course) pair, suitable for inserting
into e.g. personalized survey links.
If user is an `AnonymousUser`, returns `None`
Keyword arguments:
save -- Whether the id should be saved in an AnonymousUserId object.
"""
# This part is for ability to get xblock instance in xblock_noauth handlers, where user is unauthenticated.
assert user
if user.is_anonymous:
return None
cached_id = getattr(user, '_anonymous_id', {}).get(course_id)
if cached_id is not None:
return cached_id
# include the secret key as a salt, and to make the ids unique across different LMS installs.
hasher = hashlib.md5()
hasher.update(settings.SECRET_KEY)
hasher.update(text_type(user.id))
if course_id:
hasher.update(text_type(course_id).encode('utf-8'))
digest = hasher.hexdigest()
if not hasattr(user, '_anonymous_id'):
user._anonymous_id = {} # pylint: disable=protected-access
user._anonymous_id[course_id] = digest # pylint: disable=protected-access
if save is False:
return digest
try:
with transaction.atomic():
AnonymousUserId.objects.get_or_create(
user=user,
course_id=course_id,
anonymous_user_id=digest,
)
except IntegrityError:
# Another thread has already created this entry, so
# continue
pass
return digest
def user_by_anonymous_id(uid):
"""
Return user by anonymous_user_id using AnonymousUserId lookup table.
Do not raise `django.ObjectDoesNotExist` exception,
if there is no user for anonymous_student_id,
because this function will be used inside xmodule w/o django access.
"""
if uid is None:
return None
try:
return User.objects.get(anonymoususerid__anonymous_user_id=uid)
except ObjectDoesNotExist:
return None
def is_username_retired(username):
"""
Checks to see if the given username has been previously retired
"""
locally_hashed_usernames = user_util.get_all_retired_usernames(
username,
settings.RETIRED_USER_SALTS,
settings.RETIRED_USERNAME_FMT
)
# TODO: Revert to this after username capitalization issues detailed in
# PLAT-2276, PLAT-2277, PLAT-2278 are sorted out:
# return User.objects.filter(username__in=list(locally_hashed_usernames)).exists()
# Avoid circular import issues
from openedx.core.djangoapps.user_api.models import UserRetirementStatus
# Sandbox clean builds attempt to create users during migrations, before the database
# is stable so UserRetirementStatus may not exist yet. This workaround can also go
# when we are done with the username updates.
try:
return User.objects.filter(username__in=list(locally_hashed_usernames)).exists() or \
UserRetirementStatus.objects.filter(original_username=username).exists()
except ProgrammingError as exc:
# Check the error message to make sure it's what we expect
if "user_api_userretirementstatus" in text_type(exc):
return User.objects.filter(username__in=list(locally_hashed_usernames)).exists()
raise
def is_email_retired(email):
"""
Checks to see if the given email has been previously retired
"""
locally_hashed_emails = user_util.get_all_retired_emails(
email,
settings.RETIRED_USER_SALTS,
settings.RETIRED_EMAIL_FMT
)
return User.objects.filter(email__in=list(locally_hashed_emails)).exists()
def email_exists_or_retired(email):
"""
Check an email against the User model for existence.
"""
return User.objects.filter(email=email).exists() or is_email_retired(email)
def get_retired_username_by_username(username):
"""
If a UserRetirementStatus object with an original_username matching the given username exists,
returns that UserRetirementStatus.retired_username value. Otherwise, returns a "retired username"
hashed using the newest configured salt.
"""
UserRetirementStatus = apps.get_model('user_api', 'UserRetirementStatus')
try:
status = UserRetirementStatus.objects.filter(original_username=username).order_by('-modified').first()
if status:
return status.retired_username
except UserRetirementStatus.DoesNotExist:
pass
return user_util.get_retired_username(username, settings.RETIRED_USER_SALTS, settings.RETIRED_USERNAME_FMT)
def get_retired_email_by_email(email):
"""
If a UserRetirementStatus object with an original_email matching the given email exists,
returns that UserRetirementStatus.retired_email value. Otherwise, returns a "retired email"
hashed using the newest configured salt.
"""
UserRetirementStatus = apps.get_model('user_api', 'UserRetirementStatus')
try:
status = UserRetirementStatus.objects.filter(original_email=email).order_by('-modified').first()
if status:
return status.retired_email
except UserRetirementStatus.DoesNotExist:
pass
return user_util.get_retired_email(email, settings.RETIRED_USER_SALTS, settings.RETIRED_EMAIL_FMT)
def get_all_retired_usernames_by_username(username):
"""
Returns a generator of "retired usernames", one hashed with each
configured salt. Used for finding out if the given username has
ever been used and retired.
"""
return user_util.get_all_retired_usernames(username, settings.RETIRED_USER_SALTS, settings.RETIRED_USERNAME_FMT)
def get_all_retired_emails_by_email(email):
"""
Returns a generator of "retired emails", one hashed with each
configured salt. Used for finding out if the given email has
ever been used and retired.
"""
return user_util.get_all_retired_emails(email, settings.RETIRED_USER_SALTS, settings.RETIRED_EMAIL_FMT)
def get_potentially_retired_user_by_username(username):
"""
Attempt to return a User object based on the username, or if it
does not exist, then any hashed username salted with the historical
salts.
"""
locally_hashed_usernames = list(get_all_retired_usernames_by_username(username))
locally_hashed_usernames.append(username)
potential_users = User.objects.filter(username__in=locally_hashed_usernames)
# Have to disambiguate between several Users here as we could have retirees with
# the same username, but for case.
# If there's only 1 we're done, this should be the common case
if len(potential_users) == 1:
return potential_users[0]
# No user found, throw the usual error
if not potential_users:
raise User.DoesNotExist()
# If there are 2, one of two things should be true:
# - The user we want is un-retired and has the same case-match username
# - Or retired one was the case-match
if len(potential_users) == 2:
return potential_users[0] if potential_users[0].username == username else potential_users[1]
# We should have, at most, a retired username and an active one with a username
# differing only by case. If there are more we need to disambiguate them by hand.
raise Exception('Expected 1 or 2 Users, received {}'.format(text_type(potential_users)))
def get_potentially_retired_user_by_username_and_hash(username, hashed_username):
"""
To assist in the retirement process this method will:
- Confirm that any locally hashed username matches the passed in one
(in case of salt mismatches with the upstream script).
- Attempt to return a User object based on the username, or if it
does not exist, the any hashed username salted with the historical
salts.
"""
locally_hashed_usernames = list(get_all_retired_usernames_by_username(username))
if hashed_username not in locally_hashed_usernames:
raise Exception('Mismatched hashed_username, bad salt?')
locally_hashed_usernames.append(username)
return User.objects.get(username__in=locally_hashed_usernames)
class UserStanding(models.Model):
"""
This table contains a student's account's status.
Currently, we're only disabling accounts; in the future we can imagine
taking away more specific privileges, like forums access, or adding
more specific karma levels or probationary stages.
"""
ACCOUNT_DISABLED = "disabled"
ACCOUNT_ENABLED = "enabled"
USER_STANDING_CHOICES = (
(ACCOUNT_DISABLED, u"Account Disabled"),
(ACCOUNT_ENABLED, u"Account Enabled"),
)
user = models.OneToOneField(User, db_index=True, related_name='standing', on_delete=models.CASCADE)
account_status = models.CharField(
blank=True, max_length=31, choices=USER_STANDING_CHOICES
)
changed_by = models.ForeignKey(User, blank=True, on_delete=models.CASCADE)
standing_last_changed_at = models.DateTimeField(auto_now=True)
class UserProfile(models.Model):
"""This is where we store all the user demographic fields. We have a
separate table for this rather than extending the built-in Django auth_user.
Notes:
* Some fields are legacy ones from the first run of 6.002, from which
we imported many users.
* Fields like name and address are intentionally open ended, to account
for international variations. An unfortunate side-effect is that we
cannot efficiently sort on last names for instance.
Replication:
* Only the Portal servers should ever modify this information.
* All fields are replicated into relevant Course databases
Some of the fields are legacy ones that were captured during the initial
MITx fall prototype.
"""
# cache key format e.g user.<user_id>.profile.country = 'SG'
PROFILE_COUNTRY_CACHE_KEY = u"user.{user_id}.profile.country"
class Meta(object):
db_table = "auth_userprofile"
permissions = (("can_deactivate_users", "Can deactivate, but NOT delete users"),)
# CRITICAL TODO/SECURITY
# Sanitize all fields.
# This is not visible to other users, but could introduce holes later
user = models.OneToOneField(User, unique=True, db_index=True, related_name='profile', on_delete=models.CASCADE)
name = models.CharField(blank=True, max_length=255, db_index=True)
meta = models.TextField(blank=True) # JSON dictionary for future expansion
courseware = models.CharField(blank=True, max_length=255, default='course.xml')
# Location is no longer used, but is held here for backwards compatibility
# for users imported from our first class.
language = models.CharField(blank=True, max_length=255, db_index=True)
location = models.CharField(blank=True, max_length=255, db_index=True)
# Optional demographic data we started capturing from Fall 2012
this_year = datetime.now(UTC).year
VALID_YEARS = range(this_year, this_year - 120, -1)
year_of_birth = models.IntegerField(blank=True, null=True, db_index=True)
GENDER_CHOICES = (
('m', ugettext_noop('Male')),
('f', ugettext_noop('Female')),
# Translators: 'Other' refers to the student's gender
('o', ugettext_noop('Other/Prefer Not to Say'))
)
gender = models.CharField(
blank=True, null=True, max_length=6, db_index=True, choices=GENDER_CHOICES
)
# [03/21/2013] removed these, but leaving comment since there'll still be
# p_se and p_oth in the existing data in db.
# ('p_se', 'Doctorate in science or engineering'),
# ('p_oth', 'Doctorate in another field'),
LEVEL_OF_EDUCATION_CHOICES = (
('p', ugettext_noop('Doctorate')),
('m', ugettext_noop("Master's or professional degree")),
('b', ugettext_noop("Bachelor's degree")),
('a', ugettext_noop("Associate degree")),
('hs', ugettext_noop("Secondary/high school")),
('jhs', ugettext_noop("Junior secondary/junior high/middle school")),
('el', ugettext_noop("Elementary/primary school")),
# Translators: 'None' refers to the student's level of education
('none', ugettext_noop("No formal education")),
# Translators: 'Other' refers to the student's level of education
('other', ugettext_noop("Other education"))
)
level_of_education = models.CharField(
blank=True, null=True, max_length=6, db_index=True,
choices=LEVEL_OF_EDUCATION_CHOICES
)
mailing_address = models.TextField(blank=True, null=True)
city = models.TextField(blank=True, null=True)
country = CountryField(blank=True, null=True)
goals = models.TextField(blank=True, null=True)
allow_certificate = models.BooleanField(default=1)
bio = models.CharField(blank=True, null=True, max_length=3000, db_index=False)
profile_image_uploaded_at = models.DateTimeField(null=True, blank=True)
@property
def has_profile_image(self):
"""
Convenience method that returns a boolean indicating whether or not
this user has uploaded a profile image.
"""
return self.profile_image_uploaded_at is not None
@property
def age(self):
""" Convenience method that returns the age given a year_of_birth. """
year_of_birth = self.year_of_birth
year = datetime.now(UTC).year
if year_of_birth is not None:
return self._calculate_age(year, year_of_birth)
@property
def level_of_education_display(self):
""" Convenience method that returns the human readable level of education. """
if self.level_of_education:
return self.__enumerable_to_display(self.LEVEL_OF_EDUCATION_CHOICES, self.level_of_education)
@property
def gender_display(self):
""" Convenience method that returns the human readable gender. """
if self.gender:
return self.__enumerable_to_display(self.GENDER_CHOICES, self.gender)
def get_meta(self): # pylint: disable=missing-docstring
js_str = self.meta
if not js_str:
js_str = dict()
else:
js_str = json.loads(self.meta)
return js_str
def set_meta(self, meta_json): # pylint: disable=missing-docstring
self.meta = json.dumps(meta_json)
def set_login_session(self, session_id=None):
"""
Sets the current session id for the logged-in user.
If session_id doesn't match the existing session,
deletes the old session object.
"""
meta = self.get_meta()
old_login = meta.get('session_id', None)
if old_login:
SessionStore(session_key=old_login).delete()
meta['session_id'] = session_id
self.set_meta(meta)
self.save()
def requires_parental_consent(self, date=None, age_limit=None, default_requires_consent=True):
"""Returns true if this user requires parental consent.
Args:
date (Date): The date for which consent needs to be tested (defaults to now).
age_limit (int): The age limit at which parental consent is no longer required.
This defaults to the value of the setting 'PARENTAL_CONTROL_AGE_LIMIT'.
default_requires_consent (bool): True if users require parental consent if they
have no specified year of birth (default is True).
Returns:
True if the user requires parental consent.
"""
if age_limit is None:
age_limit = getattr(settings, 'PARENTAL_CONSENT_AGE_LIMIT', None)
if age_limit is None:
return False
# Return True if either:
# a) The user has a year of birth specified and that year is fewer years in the past than the limit.
# b) The user has no year of birth specified and the default is to require consent.
#
# Note: we have to be conservative using the user's year of birth as their birth date could be
# December 31st. This means that if the number of years since their birth year is exactly equal
# to the age limit then we have to assume that they might still not be old enough.
year_of_birth = self.year_of_birth
if year_of_birth is None:
return default_requires_consent
if date is None:
age = self.age
else:
age = self._calculate_age(date.year, year_of_birth)
return age < age_limit
def __enumerable_to_display(self, enumerables, enum_value):
""" Get the human readable value from an enumerable list of key-value pairs. """
return dict(enumerables)[enum_value]
def _calculate_age(self, year, year_of_birth):
"""Calculate the youngest age for a user with a given year of birth.
:param year: year
:param year_of_birth: year of birth
:return: youngest age a user could be for the given year
"""
# There are legal implications regarding how we can contact users and what information we can make public
# based on their age, so we must take the most conservative estimate.
return year - year_of_birth - 1
@classmethod
def country_cache_key_name(cls, user_id):
"""Return cache key name to be used to cache current country.
Args:
user_id(int): Id of user.
Returns:
Unicode cache key
"""
return cls.PROFILE_COUNTRY_CACHE_KEY.format(user_id=user_id)
@receiver(models.signals.post_save, sender=UserProfile)
def invalidate_user_profile_country_cache(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name
"""Invalidate the cache of country in UserProfile model. """
changed_fields = getattr(instance, '_changed_fields', {})
if 'country' in changed_fields:
cache_key = UserProfile.country_cache_key_name(instance.user_id)
cache.delete(cache_key)
log.info("Country changed in UserProfile for %s, cache deleted", instance.user_id)
@receiver(pre_save, sender=UserProfile)
def user_profile_pre_save_callback(sender, **kwargs):
"""
Ensure consistency of a user profile before saving it.
"""
user_profile = kwargs['instance']
# Remove profile images for users who require parental consent
if user_profile.requires_parental_consent() and user_profile.has_profile_image:
user_profile.profile_image_uploaded_at = None
# Cache "old" field values on the model instance so that they can be
# retrieved in the post_save callback when we emit an event with new and
# old field values.
user_profile._changed_fields = get_changed_fields_dict(user_profile, sender)
@receiver(post_save, sender=UserProfile)
def user_profile_post_save_callback(sender, **kwargs):
"""
Emit analytics events after saving the UserProfile.
"""
user_profile = kwargs['instance']
# pylint: disable=protected-access
emit_field_changed_events(
user_profile,
user_profile.user,
sender._meta.db_table,
excluded_fields=['meta']
)
@receiver(pre_save, sender=User)
def user_pre_save_callback(sender, **kwargs):
"""
Capture old fields on the user instance before save and cache them as a
private field on the current model for use in the post_save callback.
"""
user = kwargs['instance']
user._changed_fields = get_changed_fields_dict(user, sender)
@receiver(post_save, sender=User)
def user_post_save_callback(sender, **kwargs):
"""
When a user is modified and either its `is_active` state or email address
is changed, and the user is, in fact, active, then check to see if there
are any courses that it needs to be automatically enrolled in.
Additionally, emit analytics events after saving the User.
"""
user = kwargs['instance']
changed_fields = user._changed_fields
if 'is_active' in changed_fields or 'email' in changed_fields:
if user.is_active:
ceas = CourseEnrollmentAllowed.for_user(user).filter(auto_enroll=True)
for cea in ceas:
enrollment = CourseEnrollment.enroll(user, cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(user.email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by,
user.email,
ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason,
enrollment
)
# Because `emit_field_changed_events` removes the record of the fields that
# were changed, wait to do that until after we've checked them as part of
# the condition on whether we want to check for automatic enrollments.
# pylint: disable=protected-access
emit_field_changed_events(
user,
user,
sender._meta.db_table,
excluded_fields=['last_login', 'first_name', 'last_name'],
hidden_fields=['password']
)
class UserSignupSource(models.Model):
"""
This table contains information about users registering
via Micro-Sites
"""
user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE)
site = models.CharField(max_length=255, db_index=True)
def unique_id_for_user(user, save=True):
"""
Return a unique id for a user, suitable for inserting into
e.g. personalized survey links.
Keyword arguments:
save -- Whether the id should be saved in an AnonymousUserId object.
"""
# Setting course_id to '' makes it not affect the generated hash,
# and thus produce the old per-student anonymous id
return anonymous_id_for_user(user, None, save=save)
# TODO: Should be renamed to generic UserGroup, and possibly
# Given an optional field for type of group
class UserTestGroup(models.Model):
users = models.ManyToManyField(User, db_index=True)
name = models.CharField(blank=False, max_length=32, db_index=True)
description = models.TextField(blank=True)
class Registration(models.Model):
''' Allows us to wait for e-mail before user is registered. A
registration profile is created when the user creates an
account, but that account is inactive. Once the user clicks
on the activation key, it becomes active. '''
class Meta(object):
db_table = "auth_registration"
user = models.OneToOneField(User, on_delete=models.CASCADE)
activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True)
def register(self, user):
# MINOR TODO: Switch to crypto-secure key
self.activation_key = uuid.uuid4().hex
self.user = user
self.save()
def activate(self):
self.user.is_active = True
self._track_activation()
self.user.save()
log.info(u'User %s (%s) account is successfully activated.', self.user.username, self.user.email)
def _track_activation(self):
""" Update the isActive flag in mailchimp for activated users."""
has_segment_key = getattr(settings, 'LMS_SEGMENT_KEY', None)
has_mailchimp_id = hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID')
if has_segment_key and has_mailchimp_id:
identity_args = [
self.user.id, # pylint: disable=no-member
{
'email': self.user.email,
'username': self.user.username,
'activated': 1,
},
{
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
}
]
analytics.identify(*identity_args)
class PendingNameChange(DeletableByUserValue, models.Model):
user = models.OneToOneField(User, unique=True, db_index=True, on_delete=models.CASCADE)
new_name = models.CharField(blank=True, max_length=255)
rationale = models.CharField(blank=True, max_length=1024)
class PendingEmailChange(DeletableByUserValue, models.Model):
"""
This model keeps track of pending requested changes to a user's email address.
"""
user = models.OneToOneField(User, unique=True, db_index=True, on_delete=models.CASCADE)
new_email = models.CharField(blank=True, max_length=255, db_index=True)
activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True)
def request_change(self, email):
"""Request a change to a user's email.
Implicitly saves the pending email change record.
Arguments:
email (unicode): The proposed new email for the user.
Returns:
unicode: The activation code to confirm the change.
"""
self.new_email = email
self.activation_key = uuid.uuid4().hex
self.save()
return self.activation_key
EVENT_NAME_ENROLLMENT_ACTIVATED = 'edx.course.enrollment.activated'
EVENT_NAME_ENROLLMENT_DEACTIVATED = 'edx.course.enrollment.deactivated'
EVENT_NAME_ENROLLMENT_MODE_CHANGED = 'edx.course.enrollment.mode_changed'
class PasswordHistory(models.Model):
"""
This model will keep track of past passwords that a user has used
as well as providing contraints (e.g. can't reuse passwords)
"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
password = models.CharField(max_length=128)
time_set = models.DateTimeField(default=timezone.now)
def create(self, user):
"""
This will copy over the current password, if any of the configuration has been turned on
"""
if not (PasswordHistory.is_student_password_reuse_restricted() or
PasswordHistory.is_staff_password_reuse_restricted() or
PasswordHistory.is_password_reset_frequency_restricted() or
PasswordHistory.is_staff_forced_password_reset_enabled() or
PasswordHistory.is_student_forced_password_reset_enabled()):
return
self.user = user
self.password = user.password
self.save()
@classmethod
def is_student_password_reuse_restricted(cls):
"""
Returns whether the configuration which limits password reuse has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_diff_pw = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE', 0
)
return min_diff_pw > 0
@classmethod
def is_staff_password_reuse_restricted(cls):
"""
Returns whether the configuration which limits password reuse has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_diff_pw = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE', 0
)
return min_diff_pw > 0
@classmethod
def is_password_reset_frequency_restricted(cls):
"""
Returns whether the configuration which limits the password reset frequency has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_days_between_reset = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS'
)
return min_days_between_reset
@classmethod
def is_staff_forced_password_reset_enabled(cls):
"""
Returns whether the configuration which forces password resets to occur has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_days_between_reset = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS'
)
return min_days_between_reset
@classmethod
def is_student_forced_password_reset_enabled(cls):
"""
Returns whether the configuration which forces password resets to occur has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_days_pw_reset = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS'
)
return min_days_pw_reset
@classmethod
def should_user_reset_password_now(cls, user):
"""
Returns whether a password has 'expired' and should be reset. Note there are two different
expiry policies for staff and students
"""
assert user
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
days_before_password_reset = None
if user.is_staff:
if cls.is_staff_forced_password_reset_enabled():
days_before_password_reset = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS']
elif cls.is_student_forced_password_reset_enabled():
days_before_password_reset = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS']
if days_before_password_reset:
history = PasswordHistory.objects.filter(user=user).order_by('-time_set')
time_last_reset = None
if history:
# first element should be the last time we reset password
time_last_reset = history[0].time_set
else:
# no history, then let's take the date the user joined
time_last_reset = user.date_joined
now = timezone.now()
delta = now - time_last_reset
return delta.days >= days_before_password_reset
return False
@classmethod
def is_password_reset_too_soon(cls, user):
"""
Verifies that the password is not getting reset too frequently
"""
if not cls.is_password_reset_frequency_restricted():
return False
history = PasswordHistory.objects.filter(user=user).order_by('-time_set')
if not history:
return False
now = timezone.now()
delta = now - history[0].time_set
return delta.days < settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
@classmethod
def is_allowable_password_reuse(cls, user, new_password):
"""
Verifies that the password adheres to the reuse policies
"""
assert user
if not settings.FEATURES['ADVANCED_SECURITY']:
return True
if user.is_staff and cls.is_staff_password_reuse_restricted():
min_diff_passwords_required = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
elif cls.is_student_password_reuse_restricted():
min_diff_passwords_required = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
else:
min_diff_passwords_required = 0
# just limit the result set to the number of different
# password we need
history = PasswordHistory.objects.filter(user=user).order_by('-time_set')[:min_diff_passwords_required]
for entry in history:
# be sure to re-use the same salt
# NOTE, how the salt is serialized in the password field is dependent on the algorithm
# in pbkdf2_sha256 [LMS] it's the 3rd element, in sha1 [unit tests] it's the 2nd element
hash_elements = entry.password.split('$')
algorithm = hash_elements[0]
if algorithm == 'pbkdf2_sha256':
hashed_password = make_password(new_password, hash_elements[2])
elif algorithm == 'sha1':
hashed_password = make_password(new_password, hash_elements[1])
else:
# This means we got something unexpected. We don't want to throw an exception, but
# log as an error and basically allow any password reuse
AUDIT_LOG.error('''
Unknown password hashing algorithm "{0}" found in existing password
hash, password reuse policy will not be enforced!!!
'''.format(algorithm))
return True
if entry.password == hashed_password:
return False
return True
@classmethod
def retire_user(cls, user_id):
"""
Updates the password in all rows corresponding to a user
to an empty string as part of removing PII for user retirement.
"""
return cls.objects.filter(user_id=user_id).update(password="")
class LoginFailures(models.Model):
"""
This model will keep track of failed login attempts
"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
failure_count = models.IntegerField(default=0)
lockout_until = models.DateTimeField(null=True)
@classmethod
def _get_record_for_user(cls, user):
"""
Gets a user's record, and fixes any duplicates that may have arisen due to get_or_create
race conditions. See https://code.djangoproject.com/ticket/13906 for details.
Use this method in place of `LoginFailures.objects.get(user=user)`
"""
records = LoginFailures.objects.filter(user=user).order_by('-lockout_until')
for extra_record in records[1:]:
extra_record.delete()
return records.get()
@classmethod
def is_feature_enabled(cls):
"""
Returns whether the feature flag around this functionality has been set
"""
return settings.FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS']
@classmethod
def is_user_locked_out(cls, user):
"""
Static method to return in a given user has his/her account locked out
"""
try:
record = cls._get_record_for_user(user)
if not record.lockout_until:
return False
now = datetime.now(UTC)
until = record.lockout_until
is_locked_out = until and now < until
return is_locked_out
except ObjectDoesNotExist:
return False
@classmethod
def increment_lockout_counter(cls, user):
"""
Ticks the failed attempt counter
"""
record, _ = LoginFailures.objects.get_or_create(user=user)
record.failure_count = record.failure_count + 1
max_failures_allowed = settings.MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED
# did we go over the limit in attempts
if record.failure_count >= max_failures_allowed:
# yes, then store when this account is locked out until
lockout_period_secs = settings.MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS
record.lockout_until = datetime.now(UTC) + timedelta(seconds=lockout_period_secs)
record.save()
@classmethod
def clear_lockout_counter(cls, user):
"""
Removes the lockout counters (normally called after a successful login)
"""
try:
entry = cls._get_record_for_user(user)
entry.delete()
except ObjectDoesNotExist:
return
class CourseEnrollmentException(Exception):
pass
class NonExistentCourseError(CourseEnrollmentException):
pass
class EnrollmentClosedError(CourseEnrollmentException):
pass
class CourseFullError(CourseEnrollmentException):
pass
class AlreadyEnrolledError(CourseEnrollmentException):
pass
class CourseEnrollmentManager(models.Manager):
"""
Custom manager for CourseEnrollment with Table-level filter methods.
"""
def num_enrolled_in(self, course_id):
"""
Returns the count of active enrollments in a course.
'course_id' is the course_id to return enrollments
"""
enrollment_number = super(CourseEnrollmentManager, self).get_queryset().filter(
course_id=course_id,
is_active=1
).count()
return enrollment_number
def num_enrolled_in_exclude_admins(self, course_id):
"""
Returns the count of active enrollments in a course excluding instructors, staff and CCX coaches.
Arguments:
course_id (CourseLocator): course_id to return enrollments (count).
Returns:
int: Count of enrollments excluding staff, instructors and CCX coaches.
"""
# To avoid circular imports.
from student.roles import CourseCcxCoachRole, CourseInstructorRole, CourseStaffRole
course_locator = course_id
if getattr(course_id, 'ccx', None):
course_locator = course_id.to_course_locator()
staff = CourseStaffRole(course_locator).users_with_role()
admins = CourseInstructorRole(course_locator).users_with_role()
coaches = CourseCcxCoachRole(course_locator).users_with_role()
return super(CourseEnrollmentManager, self).get_queryset().filter(
course_id=course_id,
is_active=1,
).exclude(user__in=staff).exclude(user__in=admins).exclude(user__in=coaches).count()
def is_course_full(self, course):
"""
Returns a boolean value regarding whether a course has already reached it's max enrollment
capacity
"""
is_course_full = False
if course.max_student_enrollments_allowed is not None:
is_course_full = self.num_enrolled_in_exclude_admins(course.id) >= course.max_student_enrollments_allowed
return is_course_full
def users_enrolled_in(self, course_id, include_inactive=False):
"""
Return a queryset of User for every user enrolled in the course. If
`include_inactive` is True, returns both active and inactive enrollees
for the course. Otherwise returns actively enrolled users only.
"""
filter_kwargs = {
'courseenrollment__course_id': course_id,
}
if not include_inactive:
filter_kwargs['courseenrollment__is_active'] = True
return User.objects.filter(**filter_kwargs)
def enrollment_counts(self, course_id):
"""
Returns a dictionary that stores the total enrollment count for a course, as well as the
enrollment count for each individual mode.
"""
# Unfortunately, Django's "group by"-style queries look super-awkward
query = use_read_replica_if_available(
super(CourseEnrollmentManager, self).get_queryset().filter(course_id=course_id, is_active=True).values(
'mode').order_by().annotate(Count('mode')))
total = 0
enroll_dict = defaultdict(int)
for item in query:
enroll_dict[item['mode']] = item['mode__count']
total += item['mode__count']
enroll_dict['total'] = total
return enroll_dict
def enrolled_and_dropped_out_users(self, course_id):
"""Return a queryset of Users in the course."""
return User.objects.filter(
courseenrollment__course_id=course_id
)
# Named tuple for fields pertaining to the state of
# CourseEnrollment for a user in a course. This type
# is used to cache the state in the request cache.
CourseEnrollmentState = namedtuple('CourseEnrollmentState', 'mode, is_active')
class CourseEnrollment(models.Model):
"""
Represents a Student's Enrollment record for a single Course. You should
generally not manipulate CourseEnrollment objects directly, but use the
classmethods provided to enroll, unenroll, or check on the enrollment status
of a given student.
We're starting to consolidate course enrollment logic in this class, but
more should be brought in (such as checking against CourseEnrollmentAllowed,
checking course dates, user permissions, etc.) This logic is currently
scattered across our views.
"""
MODEL_TAGS = ['course', 'is_active', 'mode']
user = models.ForeignKey(User, on_delete=models.CASCADE)
course = models.ForeignKey(
CourseOverview,
db_constraint=False,
on_delete=models.DO_NOTHING,
)
@property
def course_id(self):
return self._course_id
@course_id.setter
def course_id(self, value):
if isinstance(value, basestring):
self._course_id = CourseKey.from_string(value)
else:
self._course_id = value
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
# If is_active is False, then the student is not considered to be enrolled
# in the course (is_enrolled() will return False)
is_active = models.BooleanField(default=True)
# Represents the modes that are possible. We'll update this later with a
# list of possible values.
mode = models.CharField(default=CourseMode.DEFAULT_MODE_SLUG, max_length=100)
| objects = CourseEnrollmentManager() | 4,276 | lcc_e | python | null | 645ab552d4c99e65670b1301bf0b423dc7ee6b9d178f1386 |
|
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2013 Aron Parsons <aronparsons@gmail.com>
# Copyright (c) 2013--2018 Red Hat, Inc.
#
# NOTE: the 'self' variable is an instance of SpacewalkShell
# unused argument
# pylint: disable=W0613
# wildcard import
# pylint: disable=W0401,W0614
# invalid function name
# pylint: disable=C0103
import shlex
try:
from xmlrpc import client as xmlrpclib
except ImportError:
import xmlrpclib
from operator import itemgetter
from xml.parsers.expat import ExpatError
from spacecmd.utils import *
__PKG_COMPARISONS = {0: 'Same',
1: 'Only here',
2: 'Newer here',
3: 'Only there',
4: 'Newer there'}
def print_package_comparison(self, results):
max_name = max_length(map(itemgetter('package_name'), results), minimum=7)
# sometimes 'this_system' or 'other_system' can be None
tmp_this = []
tmp_other = []
for item in results:
tmp_this.append(str(item.get('this_system')))
tmp_other.append(str(item.get('other_system')))
max_this = max_length(tmp_this, minimum=11)
max_other = max_length(tmp_other, minimum=12)
max_comparison = 10
# print(headers)
print('%s %s %s %s' % (
'Package'.ljust(max_name),
'This System'.ljust(max_this),
'Other System'.ljust(max_other),
'Difference'.ljust(max_comparison)))
print('%s %s %s %s' % (
'-' * max_name,
'-' * max_this,
'-' * max_other,
'-' * max_comparison))
for item in results:
# don't show packages that are the same
if item.get('comparison') == 0:
continue
print('%s %s %s %s' % (
item.get('package_name').ljust(max_name),
str(item.get('this_system')).ljust(max_this),
str(item.get('other_system')).ljust(max_other),
__PKG_COMPARISONS[item.get('comparison')]))
####################
def manipulate_child_channels(self, args, remove=False):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if len(args) < 2:
if remove:
self.help_system_removechildchannels()
else:
self.help_system_addchildchannels()
return
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
args.pop(0)
else:
systems = self.expand_systems(args.pop(0))
new_channels = args
print('Systems')
print('-------')
print('\n'.join(sorted(["%s" % x for x in systems])))
print('')
if remove:
print('Removing Channels')
print('-----------------')
else:
print('Adding Channels')
print('---------------')
print('\n'.join(sorted(new_channels)))
if not self.user_confirm():
return
for system in systems:
system_id = self.get_system_id(system)
if not system_id:
continue
child_channels = \
self.client.system.listSubscribedChildChannels(self.session,
system_id)
child_channels = [c.get('label') for c in child_channels]
if remove:
for channel in new_channels:
if channel in child_channels:
child_channels.remove(channel)
else:
for channel in new_channels:
if channel not in child_channels:
child_channels.append(channel)
self.client.system.setChildChannels(self.session,
system_id,
child_channels)
####################
def help_system_list(self):
print('system_list: List all system profiles')
print('usage: system_list')
def do_system_list(self, args, doreturn=False):
if doreturn:
return self.get_system_names()
else:
if self.get_system_names():
print('\n'.join(sorted(['%s : %s' % (v, k) for k, v in self.get_system_names_ids().iteritems()])))
return None
####################
def help_system_reboot(self):
print('system_reboot: Reboot a system')
print('''usage: system_reboot <SYSTEMS> [options])
options:
-s START_TIME''')
print('')
print(self.HELP_SYSTEM_OPTS)
print('')
print(self.HELP_TIME_OPTS)
def complete_system_reboot(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_reboot(self, args):
arg_parser = get_argument_parser()
arg_parser.add_argument('-s', '--start-time')
(args, options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_system_reboot()
return
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
# get the start time option
# skip the prompt if we are running with --yes
# use "now" if no start time was given
if is_interactive(options) and not self.options.yes:
options.start_time = prompt_user('Start Time [now]:')
options.start_time = parse_time_input(options.start_time)
else:
if not options.start_time:
options.start_time = parse_time_input('now')
else:
options.start_time = parse_time_input(options.start_time)
print('')
print('Start Time: %s' % options.start_time)
print('')
print('Systems')
print('-------')
print('\n'.join(sorted(systems)))
if not self.user_confirm('Reboot these systems [y/N]:'):
return
for system in systems:
system_id = self.get_system_id(system)
if not system_id:
continue
self.client.system.scheduleReboot(self.session, system_id, options.start_time)
####################
def help_system_search(self):
print('system_search: List systems that match the given criteria')
print('usage: system_search QUERY')
print('')
print('Available Fields:')
print('\n'.join(self.SYSTEM_SEARCH_FIELDS))
print('')
print('Examples:')
print('> system_search device:vmware')
print('> system_search ip:192.168.82')
def do_system_search(self, args, doreturn=False):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if len(args) != 1:
self.help_system_search()
return []
query = args[0]
if re.search(':', query):
try:
(field, value) = query.split(':')
except ValueError:
logging.error('Invalid query')
return []
else:
field = 'name'
value = query
if not value:
logging.warning('Invalid query')
return []
results = []
if field == 'name':
results = self.client.system.search.nameAndDescription(self.session,
value)
key = 'name'
elif field == 'id':
# build an array of key/value pairs from our local system cache
self.generate_system_cache()
results = [{'id': k, 'name': self.all_systems[k]}
for k in self.all_systems]
key = 'id'
elif field == 'ip':
results = self.client.system.search.ip(self.session, value)
key = 'ip'
elif field == 'hostname':
results = self.client.system.search.hostname(self.session, value)
key = 'hostname'
elif field == 'device':
results = self.client.system.search.deviceDescription(self.session,
value)
key = 'hw_description'
elif field == 'vendor':
results = self.client.system.search.deviceVendorId(self.session,
value)
key = 'hw_vendor_id'
elif field == 'driver':
results = self.client.system.search.deviceDriver(self.session,
value)
key = 'hw_driver'
elif field == 'uuid':
results = self.client.system.search.uuid(self.session, value)
key = 'uuid'
else:
logging.warning('Invalid search field')
return []
systems = []
max_size = 0
for s in results:
# only use real matches, not the fuzzy ones we get back
if re.search(value, "%s" % s.get(key), re.I):
if len(s.get('name')) > max_size:
max_size = len(s.get('name'))
systems.append((s.get('name'), s.get(key), s.get('id')))
if doreturn:
return [s[2] for s in systems]
else:
if systems:
for s in sorted(systems):
if key == 'name':
print(s[0])
else:
print('%s %s' % (s[0].ljust(max_size),
str(s[1]).strip()))
return []
####################
def help_system_runscript(self):
print('system_runscript: Schedule a script to run on the list of')
print(' systems provided')
print('''usage: system_runscript <SYSTEMS> [options])
options:
-u USER
-g GROUP
-t TIMEOUT
-s START_TIME
-l LABEL
-f FILE''')
print('')
print(self.HELP_SYSTEM_OPTS)
print('')
print(self.HELP_TIME_OPTS)
def complete_system_runscript(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_runscript(self, args):
arg_parser = get_argument_parser()
arg_parser.add_argument('-u', '--user')
arg_parser.add_argument('-g', '--group')
arg_parser.add_argument('-t', '--timeout')
arg_parser.add_argument('-s', '--start-time')
arg_parser.add_argument('-l', '--label')
arg_parser.add_argument('-f', '--file')
(args, options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_system_runscript()
return
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
if not systems:
logging.warning('No systems selected')
return
if is_interactive(options):
options.user = prompt_user('User [root]:')
options.group = prompt_user('Group [root]:')
# defaults
if not options.user:
options.user = 'root'
if not options.group:
options.group = 'root'
try:
options.timeout = prompt_user('Timeout (in seconds) [600]:')
if options.timeout:
options.timeout = int(options.timeout)
else:
options.timeout = 600
except ValueError:
logging.error('Invalid timeout')
return
options.start_time = prompt_user('Start Time [now]:')
options.start_time = parse_time_input(options.start_time)
options.label = prompt_user('Label/Short Description [default]:')
if options.label == "":
options.label = None
options.file = prompt_user('Script File [create]:')
# read the script provided by the user
if options.file:
keep_script_file = True
script_contents = read_file(os.path.abspath(options.file))
else:
# have the user write their script
(script_contents, options.file) = editor('#!/bin/bash')
keep_script_file = False
if not script_contents:
logging.error('No script provided')
return
else:
if not options.user:
options.user = 'root'
if not options.group:
options.group = 'root'
if not options.label:
options.label = None
if not options.timeout:
options.timeout = 600
else:
options.timeout = int(options.timeout)
if not options.start_time:
options.start_time = parse_time_input('now')
else:
options.start_time = parse_time_input(options.start_time)
if not options.file:
logging.error('A script file is required')
return
script_contents = read_file(options.file)
keep_script_file = True
# display a summary
print('')
print('User: %s' % options.user)
print('Group: %s' % options.group)
print('Timeout: %i seconds' % options.timeout)
print('Start Time: %s' % options.start_time)
print('')
if options.label:
print('Label: %s' % options.label)
print('Script Contents')
print('---------------')
print(script_contents)
print('Systems')
print('-------')
print('\n'.join(sorted(systems)))
# have the user confirm
if not self.user_confirm():
return
scheduled = 0
if self.check_api_version('10.11'):
logging.debug('Scheduling all systems for the same action')
# schedule all systems for the same action
system_ids = [self.get_system_id(s) for s in systems]
if not options.label:
action_id = self.client.system.scheduleScriptRun(self.session,
system_ids,
options.user,
options.group,
options.timeout,
script_contents,
options.start_time)
else:
action_id = self.client.system.scheduleScriptRun(self.session,
options.label,
system_ids,
options.user,
options.group,
options.timeout,
script_contents,
options.start_time)
logging.info('Action ID: %i' % action_id)
scheduled = len(system_ids)
else:
# older versions of the API require each system to be
# scheduled individually
for system in systems:
system_id = self.get_system_id(system)
if not system_id:
continue
try:
action_id = \
self.client.system.scheduleScriptRun(self.session,
system_id,
options.user,
options.group,
options.timeout,
script_contents,
options.start_time)
logging.info('Action ID: %i' % action_id)
scheduled += 1
except xmlrpclib.Fault as detail:
logging.debug(detail)
logging.error('Failed to schedule %s' % system)
logging.info('Scheduled: %i system(s)' % scheduled)
# don't delete a pre-existing script that the user provided
if not keep_script_file:
try:
os.remove(options.file)
except OSError:
logging.error('Could not remove %s' % options.file)
####################
def help_system_listhardware(self):
print('system_listhardware: List the hardware details of a system')
print('usage: system_listhardware <SYSTEMS>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_listhardware(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_listhardware(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_system_listhardware()
return
add_separator = False
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
for system in sorted(systems):
system_id = self.get_system_id(system)
if not system_id:
continue
cpu = self.client.system.getCpu(self.session, system_id)
memory = self.client.system.getMemory(self.session, system_id)
devices = self.client.system.getDevices(self.session, system_id)
network = self.client.system.getNetworkDevices(self.session,
system_id)
# Solaris systems don't have these value s
for v in ('cache', 'vendor', 'family', 'stepping'):
if not cpu.get(v):
cpu[v] = ''
try:
dmi = self.client.system.getDmi(self.session, system_id)
except ExpatError:
dmi = None
if add_separator:
print(self.SEPARATOR)
add_separator = True
if len(systems) > 1:
print('System: %s' % system)
print('')
if network:
print('Network')
print('-------')
count = 0
for device in network:
if count:
print('')
count += 1
print('Interface: %s' % device.get('interface'))
print('MAC Address: %s' % device.get('hardware_address').upper())
print('IP Address: %s' % device.get('ip'))
print('Netmask: %s' % device.get('netmask'))
print('Broadcast: %s' % device.get('broadcast'))
print('Module: %s' % device.get('module'))
print('')
print('CPU')
print('---')
print('Count: %i' % cpu.get('count'))
print('Arch: %s' % cpu.get('arch'))
print('MHz: %s' % cpu.get('mhz'))
print('Cache: %s' % cpu.get('cache'))
print('Vendor: %s' % cpu.get('vendor'))
print('Model: %s' % re.sub(r'\s+', ' ', cpu.get('model')))
print('')
print('Memory')
print('------')
print('RAM: %i' % memory.get('ram'))
print('Swap: %i' % memory.get('swap'))
if dmi:
print('')
print('DMI')
print('Vendor: %s' % dmi.get('vendor'))
print('System: %s' % dmi.get('system'))
print('Product: %s' % dmi.get('product'))
print('Board: %s' % dmi.get('board'))
print('')
print('Asset')
print('-----')
for asset in dmi.get('asset').split(') ('):
print(re.sub(r'\)|\(', '', asset))
print('')
print('BIOS Release: %s' % dmi.get('bios_release'))
print('BIOS Vendor: %s' % dmi.get('bios_vendor'))
print('BIOS Version: %s' % dmi.get('bios_version'))
if devices:
print('')
print('Devices')
print('-------')
count = 0
for device in devices:
if count:
print('')
count += 1
if device.get('description') is None:
print('Description: None')
else:
print('Description: %s' % (
wrap(device.get('description'), 60)[0]))
print('Driver: %s' % device.get('driver'))
print('Class: %s' % device.get('device_class'))
print('Bus: %s' % device.get('bus'))
####################
def help_system_installpackage(self):
print('system_installpackage: Install a package on a system')
print('''usage: system_installpackage <SYSTEMS> <PACKAGE ...> [options])
options:
-s START_TIME''')
print('')
print(self.HELP_SYSTEM_OPTS)
print('')
print(self.HELP_TIME_OPTS)
def complete_system_installpackage(self, text, line, beg, end):
parts = line.split(' ')
if len(parts) == 2:
return self.tab_complete_systems(text)
elif len(parts) > 2:
return tab_completer(self.get_package_names(), text)
return None
def do_system_installpackage(self, args):
arg_parser = get_argument_parser()
arg_parser.add_argument('-s', '--start-time')
(args, options) = parse_command_arguments(args, arg_parser)
if len(args) < 2:
self.help_system_installpackage()
return
# get the start time option
# skip the prompt if we are running with --yes
# use "now" if no start time was given
if is_interactive(options) and not self.options.yes:
options.start_time = prompt_user('Start Time [now]:')
options.start_time = parse_time_input(options.start_time)
else:
if not options.start_time:
options.start_time = parse_time_input('now')
else:
options.start_time = parse_time_input(options.start_time)
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
# remove 'ssm' from the argument list
args.pop(0)
else:
systems = self.expand_systems(args.pop(0))
packages_to_install = args
# get the ID for each system
system_ids = []
for system in sorted(systems):
system_id = self.get_system_id(system)
if not system_id:
continue
system_ids.append(system_id)
jobs = {}
if self.check_api_version('10.11'):
for package in packages_to_install:
logging.debug('Finding the latest version of %s' % package)
avail_packages = \
self.client.system.listLatestAvailablePackage(self.session,
system_ids,
package)
for system in avail_packages:
system_id = system.get('id')
if system_id not in jobs:
jobs[system_id] = []
# add this package to the system's queue
jobs[system_id].append(system.get('package').get('id'))
else:
# XXX: Satellite 5.3 compatibility
for system_id in system_ids:
logging.debug('Getting available packages for %s' %
self.get_system_name(system_id))
avail_packages = \
self.client.system.listLatestInstallablePackages(self.session,
system_id)
for package in avail_packages:
if package.get('name') in packages_to_install:
if system_id not in jobs:
jobs[system_id] = []
jobs[system_id].append(package.get('id'))
if not jobs:
logging.warning('No packages to install')
return
add_separator = False
warnings = []
for system_id in jobs:
if add_separator:
print(self.SEPARATOR)
add_separator = True
# warn the user if the request can not be 100% fulfilled
if len(jobs[system_id]) != len(packages_to_install):
# stash the warnings and show at the end so the user can see them
warnings.append(system_id)
print('%s:' % self.get_system_name(system_id))
for package_id in jobs[system_id]:
print(self.get_package_name(package_id))
# show the warnings to the user
if warnings:
print('')
for system_id in warnings:
logging.warning('%s does not have access to all requested packages' %
self.get_system_name(system_id))
print('')
print('Start Time: %s' % options.start_time)
if not self.user_confirm('Install these packages [y/N]:'):
return
scheduled = 0
for system_id in jobs:
try:
self.client.system.schedulePackageInstall(self.session,
system_id,
jobs[system_id],
options.start_time)
scheduled += 1
except xmlrpclib.Fault:
logging.error('Failed to schedule %s' % self.get_system_name(system_id))
logging.info('Scheduled %i system(s)' % scheduled)
####################
def help_system_removepackage(self):
print('system_removepackage: Remove a package from a system')
print('''usage: system_removepackage <SYSTEMS> <PACKAGE ...> [options])
options:
-s START_TIME''')
print('')
print(self.HELP_SYSTEM_OPTS)
print('')
print(self.HELP_TIME_OPTS)
def complete_system_removepackage(self, text, line, beg, end):
parts = line.split(' ')
if len(parts) == 2:
return self.tab_complete_systems(text)
elif len(parts) > 2:
return tab_completer(self.get_package_names(), text)
return None
def do_system_removepackage(self, args):
arg_parser = get_argument_parser()
arg_parser.add_argument('-s', '--start-time')
(args, options) = parse_command_arguments(args, arg_parser)
if len(args) < 2:
self.help_system_removepackage()
return
# get the start time option
# skip the prompt if we are running with --yes
# use "now" if no start time was given
if is_interactive(options) and not self.options.yes:
options.start_time = prompt_user('Start Time [now]:')
options.start_time = parse_time_input(options.start_time)
else:
if not options.start_time:
options.start_time = parse_time_input('now')
else:
options.start_time = parse_time_input(options.start_time)
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
# remove 'ssm' from the argument list
args.pop(0)
else:
systems = self.expand_systems(args.pop(0))
package_list = args
# get all matching package names
logging.debug('Finding matching packages')
matching_packages = \
filter_results(self.get_package_names(True), package_list, True)
jobs = {}
for package_name in matching_packages:
logging.debug('Finding systems with %s' % package_name)
installed_systems = {}
for package_id in self.get_package_id(package_name):
for system in self.client.system.listSystemsWithPackage(self.session, package_id):
installed_systems[system.get('name')] = package_id
# each system has a list of packages to remove so that only one
# API call needs to be made to schedule all the package removals
# for each system
for system in systems:
if system in installed_systems.keys():
if system not in jobs:
jobs[system] = []
jobs[system].append(installed_systems[system])
add_separator = False
for system in jobs:
if add_separator:
print(self.SEPARATOR)
add_separator = True
print('%s:' % system)
for package in jobs[system]:
print(self.get_package_name(package))
if not jobs:
return
print('')
print('Start Time: %s' % options.start_time)
if not self.user_confirm('Remove these packages [y/N]:'):
return
scheduled = 0
for system in jobs:
system_id = self.get_system_id(system)
if not system_id:
continue
try:
action_id = self.client.system.schedulePackageRemove(self.session,
system_id,
jobs[system],
options.start_time)
logging.info('Action ID: %i' % action_id)
scheduled += 1
except xmlrpclib.Fault:
logging.error('Failed to schedule %s' % system)
logging.info('Scheduled %i system(s)' % scheduled)
####################
def help_system_upgradepackage(self):
print('system_upgradepackage: Upgrade a package on a system')
print('''usage: system_upgradepackage <SYSTEMS> <PACKAGE ...>|* [options]')
options:
-s START_TIME''')
print('')
print(self.HELP_SYSTEM_OPTS)
print('')
print(self.HELP_TIME_OPTS)
def complete_system_upgradepackage(self, text, line, beg, end):
parts = line.split(' ')
if len(parts) == 2:
return self.tab_complete_systems(text)
elif len(parts) > 2:
return tab_completer(self.get_package_names(), text)
return None
def do_system_upgradepackage(self, args):
arg_parser = get_argument_parser()
arg_parser.add_argument('-s', '--start-time')
# this will come handy for individual packages, as we call
# self.do_system_installpackage anyway
orig_args = args
(args, options) = parse_command_arguments(args, arg_parser)
if len(args) < 2:
self.help_system_upgradepackage()
return None
# install and upgrade for individual packages are the same
if not '.*' in args[1:]:
return self.do_system_installpackage(orig_args)
# get the start time option
# skip the prompt if we are running with --yes
# use "now" if no start time was given
if is_interactive(options) and not self.options.yes:
options.start_time = prompt_user('Start Time [now]:')
options.start_time = parse_time_input(options.start_time)
else:
if not options.start_time:
options.start_time = parse_time_input('now')
else:
options.start_time = parse_time_input(options.start_time)
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
# remove 'ssm' from the argument list
args.pop(0)
else:
systems = self.expand_systems(args.pop(0))
# make a dictionary of each system and the package IDs to install
jobs = {}
for system in sorted(systems):
system_id = self.get_system_id(system)
if not system_id:
continue
packages = \
self.client.system.listLatestUpgradablePackages(self.session,
system_id)
if packages:
package_ids = [p.get('to_package_id') for p in packages]
jobs[system] = package_ids
else:
logging.warning('No upgrades available for %s' % system)
if not jobs:
return None
add_separator = False
for system in jobs:
if add_separator:
print(self.SEPARATOR)
add_separator = True
print(system)
print('-' * len(system))
# build a temporary list so we can sort by package name
package_names = []
for package in jobs[system]:
name = self.get_package_name(package)
if name:
package_names.append(name)
else:
logging.error("Couldn't get name for package %i" % package)
print('\n'.join(sorted(package_names)))
print('')
print('Start Time: %s' % options.start_time)
if not self.user_confirm('Upgrade these packages [y/N]:'):
return None
scheduled = 0
for system in jobs:
system_id = self.get_system_id(system)
try:
self.client.system.schedulePackageInstall(self.session,
system_id,
jobs[system],
options.start_time)
scheduled += 1
except xmlrpclib.Fault:
logging.error('Failed to schedule %s' % system)
logging.info('Scheduled %i system(s)' % scheduled)
return None
####################
def help_system_listupgrades(self):
print('system_listupgrades: List the available upgrades for a system')
print('usage: system_listupgrades <SYSTEMS>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_listupgrades(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_listupgrades(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_system_listupgrades()
return
add_separator = False
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
for system in sorted(systems):
system_id = self.get_system_id(system)
if not system_id:
continue
packages = \
self.client.system.listLatestUpgradablePackages(self.session,
system_id)
if not packages:
logging.warning('No upgrades available for %s' % system)
continue
if add_separator:
print(self.SEPARATOR)
add_separator = True
if len(systems) > 1:
print(system)
print('-' * len(system))
latest_packages = filter_latest_packages(packages, 'to_version', 'to_release', 'to_epoch')
for package in sorted(latest_packages.values(), key=itemgetter('name')):
print(build_package_names({
'name': package['name'],
'version': package['to_version'],
'release': package['to_release'],
'epoch': package['to_epoch'],
'arch': package['to_arch']
}))
####################
def help_system_listinstalledpackages(self):
print('system_listinstalledpackages: List the installed packages on a')
print(' system')
print('usage: system_listinstalledpackages <SYSTEMS>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_listinstalledpackages(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_listinstalledpackages(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_system_listinstalledpackages()
return
add_separator = False
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
for system in sorted(systems):
system_id = self.get_system_id(system)
if not system_id:
continue
packages = self.client.system.listPackages(self.session,
system_id)
if add_separator:
print(self.SEPARATOR)
add_separator = True
if len(systems) > 1:
print('System: %s' % system)
print('')
print('\n'.join(build_package_names(packages)))
####################
def help_system_listconfigchannels(self):
print('system_listconfigchannels: List the config channels of a system')
print('usage: system_listconfigchannels <SYSTEMS>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_listconfigchannels(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_listconfigchannels(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_system_listconfigchannels()
return
add_separator = False
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
for system in sorted(systems):
system_id = self.get_system_id(system)
if not system_id:
continue
if add_separator:
print(self.SEPARATOR)
add_separator = True
if len(systems) > 1:
print('System: %s' % system)
try:
channels = self.client.system.config.listChannels(self.session,
system_id)
except xmlrpclib.Fault:
logging.warning('%s does not support configuration channels' %
system)
continue
print('\n'.join([c.get('label') for c in channels]))
####################
def print_configfiles(self, quiet, filelist):
# Figure out correct indentation to allow pretty table output
max_path = max_length([f['path'] for f in filelist], minimum=10)
max_type = max_length(["file", "directory", "symlink"], minimum=10)
max_label = max_length([f['channel_label'] for f in filelist], minimum=15)
# print(header when not in quiet mode)
if not quiet:
print('%s %s %s' % (
'path'.ljust(max_path),
'type'.ljust(max_type),
'label/type'.ljust(max_label)))
print('%s %s %s' % (
'-' * max_path,
'-' * max_type,
'-' * max_label))
for f in filelist:
print('%s %s %s' % (f['path'].ljust(max_path),
f['type'].ljust(max_type),
f['channel_label'].ljust(max_label)))
def help_system_listconfigfiles(self):
print('system_listconfigfiles: List the managed config files of a system')
print('''usage: system_listconfigfiles <SYSTEMS>')
options:
-s/--sandbox : list only system-sandbox files
-l/--local : list only locally managed files
-c/--central : list only centrally managed files
-q/--quiet : quiet mode (omits the header)''')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_listconfigfiles(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_listconfigfiles(self, args):
arg_parser = get_argument_parser()
arg_parser.add_argument('-s', '--sandbox', action='store_true')
arg_parser.add_argument('-l', '--local', action='store_true')
arg_parser.add_argument('-c', '--central', action='store_true')
arg_parser.add_argument('-q', '--quiet', action='store_true')
(args, options) = parse_command_arguments(args, arg_parser)
if not options.sandbox and not options.local and not options.central:
logging.debug("No sandbox/local/central option specified, listing ALL")
options.sandbox = True
options.local = True
options.central = True
if not args:
self.help_system_listconfigfiles()
return
add_separator = False
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
for system in sorted(systems):
system_id = self.get_system_id(system)
if not system_id:
continue
if add_separator:
print(self.SEPARATOR)
add_separator = True
if len(systems) > 1:
print('System: %s' % system)
try:
# Pass 0 for system-sandbox files
# Pass 1 for locally managed or centrally managed
files = self.client.system.config.listFiles(self.session,
system_id, 0)
files += self.client.system.config.listFiles(self.session,
system_id, 1)
except xmlrpclib.Fault:
logging.warning('%s does not support configuration channels' %
system)
continue
# For system sandbox or locally managed files, there is no
# channel_label so we add a descriptive label for these files
toprint = []
for f in files:
if f['channel_type']['label'] == 'server_import':
f['channel_label'] = "system_sandbox"
if options.sandbox:
toprint.append(f)
elif f['channel_type']['label'] == 'local_override':
f['channel_label'] = "locally_managed"
if options.local:
toprint.append(f)
elif f['channel_type']['label'] == 'normal':
if options.central:
toprint.append(f)
else:
logging.error("Error, unexpected channel type label %s" %
f['channel_type']['label'])
return
self.print_configfiles(options.quiet, toprint)
####################
def help_system_addconfigfile(self):
print('system_addconfigfile: Create a configuration file')
print('Note this is only for system sandbox or locally-managed files')
print('Centrally managed files should be created via configchannel_addfile')
print('''usage: system_addconfigfile [SYSTEM] [options]
options:
-S/--sandbox : list only system-sandbox files
-L/--local : list only locally managed files
-p PATH
-r REVISION
-o OWNER [default: root]
-g GROUP [default: root]
-m MODE [defualt: 0644]
-x SELINUX_CONTEXT
-d path is a directory
-s path is a symlink
-b path is a binary (or other file which needs base64 encoding)
-t SYMLINK_TARGET
-f local path to file contents
Note re binary/base64: Some text files, notably those containing trailing
newlines, those containing ASCII escape characters (or other charaters not
allowed in XML) need to be sent as binary (-b). Some effort is made to auto-
detect files which require this, but you may need to explicitly specify.
''')
def complete_system_addconfigfile(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_addconfigfile(self, args, update_path=''):
arg_parser = get_argument_parser()
arg_parser.add_argument('-S', '--sandbox', action='store_true')
arg_parser.add_argument('-L', '--local', action='store_true')
arg_parser.add_argument('-p', '--path')
arg_parser.add_argument('-o', '--owner')
arg_parser.add_argument('-g', '--group')
arg_parser.add_argument('-m', '--mode')
arg_parser.add_argument('-x', '--selinux-ctx')
arg_parser.add_argument('-t', '--target-path')
arg_parser.add_argument('-f', '--file')
arg_parser.add_argument('-r', '--revision')
arg_parser.add_argument('-s', '--symlink', action='store_true')
arg_parser.add_argument('-b', '--binary', action='store_true')
arg_parser.add_argument('-d', '--directory', action='store_true')
(args, options) = parse_command_arguments(args, arg_parser)
file_info = None
# the system name can be passed in
if args:
options.system = args[0]
interactive = is_interactive(options)
if interactive:
if not options.system:
while True:
print('Systems')
print('----------------------')
print('\n'.join(sorted(self.do_system_list('', True))))
print('')
options.system = prompt_user('Select:', noblank=True)
# ensure the user enters a valid system
if options.system in self.do_system_list('', True):
break
else:
print('')
logging.warning('%s is not a valid system' %
options.system)
print('')
if update_path:
options.path = update_path
else:
options.path = prompt_user('Path:', noblank=True)
while not options.local and not options.sandbox:
answer = prompt_user('System-Sandbox or Locally-Managed? [S/L]:')
if re.match('L', answer, re.I):
options.local = True
localopt = 1
elif re.match('S', answer, re.I):
options.sandbox = True
localopt = 0
# Set the int variable (required by the API calls) for sandbox/local
localopt = 0
if options.local:
logging.debug("Selected locally-managed")
localopt = 1
elif options.sandbox:
logging.debug("Selected system-sandbox")
else:
logging.error("Must choose system-sandbox or locally-managed option")
self.help_system_addconfigfile()
return
if not options.system:
logging.error("Must provide system")
self.help_system_addconfigfile()
return
system_id = self.get_system_id(options.system)
logging.debug("Got ID %s for system %s" % (system_id, options.system))
# check if this file already exists
try:
file_info = self.client.system.config.lookupFileInfo(self.session,
system_id, [options.path], localopt)
if file_info:
logging.debug("Found existing file_info %s" % file_info)
except xmlrpclib.Fault:
logging.debug("No existing file information found for %s" %
options.path)
file_info = self.configfile_getinfo(args, options, file_info, interactive)
if self.user_confirm():
if options.symlink:
self.client.system.config.createOrUpdateSymlink(self.session,
system_id, options.path, file_info, localopt)
else:
self.client.system.config.createOrUpdatePath(self.session,
system_id, options.path, options.directory, file_info,
localopt)
####################
def help_system_addconfigchannels(self):
print('system_addconfigchannels: Add config channels to a system')
print('''usage: system_addconfigchannels <SYSTEMS> <CHANNEL ...> [options]
options:
-t add channels to the top of the list
-b add channels to the bottom of the list''')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_addconfigchannels(self, text, line, beg, end):
parts = line.split(' ')
if len(parts) == 2:
return self.tab_complete_systems(text)
elif len(parts) > 2:
return tab_completer(self.do_configchannel_list('', True),
text)
return None
def do_system_addconfigchannels(self, args):
arg_parser = get_argument_parser()
arg_parser.add_argument('-t', '--top', action='store_true')
arg_parser.add_argument('-b', '--bottom', action='store_true')
(args, options) = parse_command_arguments(args, arg_parser)
if len(args) < 2:
self.help_system_addconfigchannels()
return
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
args.pop(0)
else:
systems = self.expand_systems(args.pop(0))
channels = args
if is_interactive(options):
answer = prompt_user('Add to top or bottom? [T/b]:')
if re.match('b', answer, re.I):
options.top = False
else:
options.top = True
else:
if options.bottom:
options.top = False
else:
options.top = True
system_ids = [self.get_system_id(s) for s in systems]
self.client.system.config.addChannels(self.session,
system_ids,
channels,
options.top)
####################
def help_system_removeconfigchannels(self):
print('system_removeconfigchannels: Remove config channels from a system')
print('usage: system_removeconfigchannels <SYSTEMS> <CHANNEL ...>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_removeconfigchannels(self, text, line, beg, end):
parts = line.split(' ')
if len(parts) == 2:
return self.tab_complete_systems(text)
elif len(parts) > 2:
return tab_completer(self.do_configchannel_list('', True),
text)
return None
def do_system_removeconfigchannels(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if len(args) < 2:
self.help_system_removeconfigchannels()
return
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
args.pop(0)
else:
systems = self.expand_systems(args.pop(0))
channels = args
system_ids = [self.get_system_id(s) for s in systems]
self.client.system.config.removeChannels(self.session,
system_ids,
channels)
####################
def help_system_setconfigchannelorder(self):
print('system_setconfigchannelorder: Set the ranked order of configuration channels')
print('usage: system_setconfigchannelorder <SYSTEMS>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_setconfigchannelorder(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_setconfigchannelorder(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_system_setconfigchannelorder()
return
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args.pop(0))
# get the current configuration channels from the first system
# in the list
system_id = self.get_system_id(systems[0])
new_channels = self.client.system.config.listChannels(self.session,
system_id)
new_channels = [c.get('label') for c in new_channels]
# call an interface for the user to make selections
all_channels = self.do_configchannel_list('', True)
new_channels = config_channel_order(all_channels, new_channels)
print('')
print('New Configuration Channels')
print('--------------------------')
for i, new_channel in enumerate(new_channels, 1):
print('[%i] %s' % (i, new_channel))
if not self.user_confirm():
return
system_ids = [self.get_system_id(s) for s in systems]
self.client.system.config.setChannels(self.session,
system_ids,
new_channels)
####################
def help_system_deployconfigfiles(self):
print('system_deployconfigfiles: Deploy all configuration files for a system')
print('''usage: system_deployconfigfiles <SYSTEMS> [options]
options:
-s START_TIME''')
print('')
print(self.HELP_SYSTEM_OPTS)
print('')
print(self.HELP_TIME_OPTS)
def complete_system_deployconfigfiles(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_deployconfigfiles(self, args):
arg_parser = get_argument_parser()
arg_parser.add_argument('-s', '--start-time')
(args, options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_system_deployconfigfiles()
return
# get the start time option
# skip the prompt if we are running with --yes
# use "now" if no start time was given
if is_interactive(options) and not self.options.yes:
options.start_time = prompt_user('Start Time [now]:')
options.start_time = parse_time_input(options.start_time)
else:
if not options.start_time:
options.start_time = parse_time_input('now')
else:
options.start_time = parse_time_input(options.start_time)
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
if not systems:
return
print('')
print('Start Time: %s' % options.start_time)
print('')
print('Systems')
print('-------')
print('\n'.join(sorted(systems)))
message = 'Deploy ALL configuration files to these systems [y/N]:'
if not self.user_confirm(message):
return
system_ids = [self.get_system_id(s) for s in systems]
self.client.system.config.deployAll(self.session,
system_ids,
options.start_time)
logging.info('Scheduled deployment for %i system(s)' % len(system_ids))
####################
def help_system_delete(self):
print('system_delete: Delete a system profile')
print('usage: system_delete <SYSTEMS>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_delete(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_delete(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_system_delete()
return
system_ids = []
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
# get the system ID for each system
for system in systems:
system_id = self.get_system_id(system)
if not system_id:
continue
system_ids.append(system_id)
if not system_ids:
logging.warning('No systems to delete')
return
# make the column the right size
colsize = max_length([self.get_system_name(s) for s in system_ids])
if colsize < 7:
colsize = 7
print('%s System ID' % 'Profile'.ljust(colsize))
print('%s ---------' % ('-' * colsize))
# print(a summary for the user)
for system_id in system_ids:
print('%s %i' %
(self.get_system_name(system_id).ljust(colsize), system_id))
if not self.user_confirm('Delete these systems [y/N]:'):
return
self.client.system.deleteSystems(self.session, system_ids)
logging.info('Deleted %i system(s)', len(system_ids))
# regenerate the system name cache
self.generate_system_cache(True, delay=1)
# remove these systems from the SSM
for s in systems:
if s in self.ssm:
self.ssm.remove(s)
####################
def help_system_lock(self):
print('system_lock: Lock a system')
print('usage: system_lock <SYSTEMS>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_lock(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_lock(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_system_lock()
return
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
for system in sorted(systems):
system_id = self.get_system_id(system)
if not system_id:
continue
self.client.system.setLockStatus(self.session, system_id, True)
####################
def help_system_unlock(self):
print('system_unlock: Unlock a system')
print('usage: system_unlock <SYSTEMS>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_unlock(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_unlock(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_system_unlock()
return
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
for system in sorted(systems):
system_id = self.get_system_id(system)
if not system_id:
continue
self.client.system.setLockStatus(self.session, system_id, False)
####################
def help_system_rename(self):
print('system_rename: Rename a system profile')
print('usage: system_rename OLDNAME NEWNAME')
def complete_system_rename(self, text, line, beg, end):
if len(line.split(' ')) == 2:
return tab_completer(self.get_system_names(), text)
return None
def do_system_rename(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if len(args) != 2:
self.help_system_rename()
return
(old_name, new_name) = args
system_id = self.get_system_id(old_name)
if not system_id:
return
print('%s (%s) -> %s' % (old_name, system_id, new_name))
if not self.user_confirm():
return
self.client.system.setProfileName(self.session,
system_id,
new_name)
# regenerate the cache of systems
self.generate_system_cache(True)
# update the SSM
if old_name in self.ssm:
self.ssm.remove(old_name)
self.ssm.append(new_name)
####################
def help_system_listcustomvalues(self):
print('system_listcustomvalues: List the custom values for a system')
print('usage: system_listcustomvalues <SYSTEMS>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_listcustomvalues(self, text, line, beg, end):
return self.tab_complete_systems(text)
def do_system_listcustomvalues(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if not args:
self.help_system_listcustomvalues()
return
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
add_separator = False
for system in systems:
if add_separator:
print(self.SEPARATOR)
add_separator = True
if len(systems) > 1:
print('System: %s' % system)
print('')
system_id = self.get_system_id(system)
if not system_id:
continue
values = self.client.system.getCustomValues(self.session,
system_id)
for v in values:
print('%s = %s' % (v, values[v]))
####################
def help_system_addcustomvalue(self):
print('system_addcustomvalue: Set a custom value for a system')
print('usage: system_addcustomvalue KEY VALUE <SYSTEMS>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_addcustomvalue(self, text, line, beg, end):
parts = shlex.split(line)
if line[-1] == ' ':
parts.append('')
if len(parts) == 2:
return tab_completer(self.do_custominfo_listkeys('', True), text)
elif len(parts) >= 4:
return self.tab_complete_systems(text)
return None
def do_system_addcustomvalue(self, args):
if not isinstance(args, list):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if len(args) < 3:
self.help_system_addcustomvalue()
return
key = args[0]
value = args[1]
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args[2:])
for system in systems:
system_id = self.get_system_id(system)
if not system_id:
continue
self.client.system.setCustomValues(self.session,
system_id,
{key: value})
####################
def help_system_updatecustomvalue(self):
print('system_updatecustomvalue: Update a custom value for a system')
print('usage: system_updatecustomvalue KEY VALUE <SYSTEMS>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_updatecustomvalue(self, text, line, beg, end):
parts = shlex.split(line)
if line[-1] == ' ':
parts.append('')
if len(parts) == 2:
return tab_completer(self.do_custominfo_listkeys('', True), text)
elif len(parts) >= 4:
return self.tab_complete_systems(text)
return None
def do_system_updatecustomvalue(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if len(args) < 3:
self.help_system_updatecustomvalue()
return None
return self.do_system_addcustomvalue(args)
####################
def help_system_removecustomvalues(self):
print('system_removecustomvalues: Remove a custom value for a system')
print('usage: system_removecustomvalues <SYSTEMS> <KEY ...>')
print('')
print(self.HELP_SYSTEM_OPTS)
def complete_system_removecustomvalues(self, text, line, beg, end):
parts = line.split(' ')
if len(parts) == 2:
return self.tab_complete_systems(text)
elif len(parts) == 3:
return tab_completer(self.do_custominfo_listkeys('', True),
text)
return None
def do_system_removecustomvalues(self, args):
arg_parser = get_argument_parser()
(args, _options) = parse_command_arguments(args, arg_parser)
if len(args) < 2:
self.help_system_removecustomvalues()
return
# use the systems listed in the SSM
if re.match('ssm', args[0], re.I):
systems = self.ssm.keys()
else:
systems = self.expand_systems(args)
keys = args[1:]
if not self.user_confirm('Delete these values [y/N]:'):
return
for system in systems:
| system_id = self.get_system_id(system) | 5,050 | lcc_e | python | null | 4189db9c56d2c78001f372323a80282fe9e88bbc31bf9670 |
|
# -*- encoding: utf-8 -*-
from abjad.tools.durationtools import Duration
from abjad.tools.lilypondparsertools.LilyPondDuration import LilyPondDuration
lilypond_version = "2.17.9"
current_module = {
'$current-book': '$current-book',
'$current-bookpart': '$current-bookpart',
'$defaultheader': '$defaultheader',
'$defaultlayout': '$defaultlayout',
'$defaultmidi': '$defaultmidi',
'$defaultpaper': '$defaultpaper',
'%module-public-interface': '%module-public-interface',
'RemoveEmptyStaves': 'RemoveEmptyStaves',
'accent': {
'articulation-type': 'accent',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'acciaccatura': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'accidentalStyle': {
'signature': ('ly:music?', 'symbol-list?',),
'type': 'ly:music-function?',
},
'add-toc-item!': 'add-toc-item!',
'addChordShape': {
'signature': ('void?', 'symbol?', 'pair?', 'string-or-pair?',),
'type': 'ly:music-function?',
},
'addInstrumentDefinition': {
'signature': ('void?', 'string?', 'list?',),
'type': 'ly:music-function?',
},
'addQuote': {
'signature': ('void?', 'string?', 'ly:music?',),
'type': 'ly:music-function?',
},
'aeolian': 'aeolian',
'afterGrace': {
'signature': ('ly:music?', 'ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'afterGraceFraction': 'afterGraceFraction',
'aikenHeads': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'aikenHeadsMinor': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'allowPageTurn': {
'signature': ('ly:music?',),
'type': 'ly:music-function?',
},
'allowVoltaHook': {
'signature': ('void?', 'string?',),
'type': 'ly:music-function?',
},
'alterBroken': {
'signature': ('ly:music?', 'symbol-list-or-symbol?', 'list?', 'symbol-list-or-music?',),
'type': 'ly:music-function?',
},
'appendToTag': {
'signature': ('ly:music?', 'symbol?', 'ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'applyContext': {
'signature': ('ly:music?', 'procedure?',),
'type': 'ly:music-function?',
},
'applyMusic': {
'signature': ('ly:music?', 'procedure?', 'ly:music?',),
'type': 'ly:music-function?',
},
'applyOutput': {
'signature': ('ly:music?', 'symbol?', 'procedure?',),
'type': 'ly:music-function?',
},
'appoggiatura': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'arpeggio': {
'name': 'ArpeggioEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'arpeggio-event', 'event',),
},
'arpeggioArrowDown': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'arpeggioArrowUp': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'arpeggioBracket': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'arpeggioNormal': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'arpeggioParenthesis': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'arpeggioParenthesisDashed': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'assertBeamQuant': {
'signature': ('ly:music?', 'pair?', 'pair?',),
'type': 'ly:music-function?',
},
'assertBeamSlope': {
'signature': ('ly:music?', 'procedure?',),
'type': 'ly:music-function?',
},
'autoBeamOff': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'autoBeamOn': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'autochange': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'balloonGrobText': {
'signature': ('ly:music?', 'symbol?', 'number-pair?', 'cheap-markup?',),
'type': 'ly:music-function?',
},
'balloonLengthOff': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'balloonLengthOn': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'balloonText': {
'signature': ('ly:music?', 'number-pair?', 'cheap-markup?',),
'type': 'ly:music-function?',
},
'banjo-c-tuning': 'banjo-c-tuning',
'banjo-modal-tuning': 'banjo-modal-tuning',
'banjo-open-d-tuning': 'banjo-open-d-tuning',
'banjo-open-dm-tuning': 'banjo-open-dm-tuning',
'banjo-open-g-tuning': 'banjo-open-g-tuning',
'bar': {
'signature': ('ly:music?', 'string?',),
'type': 'ly:music-function?',
},
'barNumberCheck': {
'signature': ('ly:music?', 'integer?',),
'type': 'ly:music-function?',
},
'baritone-ukulele-tuning': 'baritone-ukulele-tuning',
'bass-drop-d-tuning': 'bass-drop-d-tuning',
'bass-five-string-tuning': 'bass-five-string-tuning',
'bass-four-string-tuning': 'bass-four-string-tuning',
'bass-six-string-tuning': 'bass-six-string-tuning',
'bass-tuning': 'bass-tuning',
'bassFigureExtendersOff': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'bassFigureExtendersOn': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'bassFigureStaffAlignmentDown': {
'context-type': 'Staff',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'bassFigureStaffAlignmentNeutral': {
'context-type': 'Staff',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'bassFigureStaffAlignmentUp': {
'context-type': 'Staff',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'bendAfter': {
'signature': ('ly:event?', 'real?',),
'type': 'ly:music-function?',
},
'bigger': 1,
'blackTriangleMarkup': 'blackTriangleMarkup',
'bongos-style': 'bongos-style',
'book-bookpart-handler': 'book-bookpart-handler',
'book-music-handler': 'book-music-handler',
'book-score-handler': 'book-score-handler',
'book-text-handler': 'book-text-handler',
'bookOutputName': {
'signature': ('void?', 'string?',),
'type': 'ly:music-function?',
},
'bookOutputSuffix': {
'signature': ('void?', 'string?',),
'type': 'ly:music-function?',
},
'bookpart-music-handler': 'bookpart-music-handler',
'bookpart-score-handler': 'bookpart-score-handler',
'bookpart-text-handler': 'bookpart-text-handler',
'bracketCloseSymbol': {
'name': 'BeamEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'beam-event', 'span-event',),
},
'bracketOpenSymbol': {
'name': 'BeamEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'beam-event', 'span-event',),
},
'break': {
'name': 'LineBreakEvent',
'type': 'ly:prob?',
'types': ('general-music', 'line-break-event', 'break-event', 'event',),
},
'breakDynamicSpan': {
'name': 'BreakDynamicSpanEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'break-span-event', 'break-dynamic-span-event', 'event',),
},
'breathe': {
'signature': ('ly:music?',),
'type': 'ly:music-function?',
},
'breve': LilyPondDuration(Duration(2, 1), None),
'cadenzaOff': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'cadenzaOn': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'cello-tuning': 'cello-tuning',
'center': 0,
'chord-shape': 'chord-shape',
'chord-shape-table': 'chord-shape-table',
'chordRepeats': {
'signature': ('ly:music?', 'optional?', 'list?', 'ly:music?',),
'type': 'ly:music-function?',
},
'chordmodifiers': 'chordmodifiers',
'clef': {
'signature': ('ly:music?', 'string?',),
'type': 'ly:music-function?',
},
'coda': {
'articulation-type': 'coda',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'compoundMeter': {
'signature': ('ly:music?', 'pair?',),
'type': 'ly:music-function?',
},
'compressFullBarRests': {
'context-type': 'Score',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'congas-style': 'congas-style',
'context-mod-music-handler': 'context-mod-music-handler',
'cr': {
'name': 'CrescendoEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'span-dynamic-event', 'crescendo-event', 'event',),
},
'cresc': {
'name': 'CrescendoEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'span-dynamic-event', 'crescendo-event', 'event',),
},
'crescHairpin': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'crescTextCresc': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'crossStaff': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'cueClef': {
'signature': ('ly:music?', 'string?',),
'type': 'ly:music-function?',
},
'cueClefUnset': {
'signature': ('ly:music?',),
'type': 'ly:music-function?',
},
'cueDuring': {
'signature': ('ly:music?', 'string?', 'ly:dir?', 'ly:music?',),
'type': 'ly:music-function?',
},
'cueDuringWithClef': {
'signature': ('ly:music?', 'string?', 'ly:dir?', 'string?', 'ly:music?',),
'type': 'ly:music-function?',
},
'dashBar': {
'alias': 'staccatissimo',
'type': 'alias',
},
'dashDash': {
'alias': 'tenuto',
'type': 'alias',
},
'dashDot': {
'alias': 'staccato',
'type': 'alias',
},
'dashHat': {
'alias': 'marcato',
'type': 'alias',
},
'dashLarger': {
'alias': 'accent',
'type': 'alias',
},
'dashPlus': {
'alias': 'stopped',
'type': 'alias',
},
'dashUnderscore': {
'alias': 'portato',
'type': 'alias',
},
'deadNote': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'deadNotesOff': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'deadNotesOn': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'decr': {
'name': 'DecrescendoEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'span-dynamic-event', 'decrescendo-event', 'event',),
},
'decresc': {
'name': 'DecrescendoEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'span-dynamic-event', 'decrescendo-event', 'event',),
},
'default-fret-table': 'default-fret-table',
'defaultNoteHeads': {
'signature': ('ly:music?',),
'type': 'ly:music-function?',
},
'defaultStringTunings': 'defaultStringTunings',
'defaultTimeSignature': {
'context-type': 'Staff',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'defineBarLine': {
'signature': ('void?', 'string?', 'list?',),
'type': 'ly:music-function?',
},
'deprecatedcresc': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'deprecateddim': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'deprecatedendcresc': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'deprecatedenddim': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'dim': {
'name': 'DecrescendoEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'span-dynamic-event', 'decrescendo-event', 'event',),
},
'dimHairpin': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'dimTextDecr': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'dimTextDecresc': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'dimTextDim': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'displayLilyMusic': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'displayMusic': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'dorian': 'dorian',
'dotsDown': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'dotsNeutral': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'dotsUp': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'double-bass-tuning': 'double-bass-tuning',
'down': -1,
'downbow': {
'articulation-type': 'downbow',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'downmordent': {
'articulation-type': 'downmordent',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'downprall': {
'articulation-type': 'downprall',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'drumPitchNames': 'drumPitchNames',
'drums-style': 'drums-style',
'dynamicDown': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'dynamicNeutral': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'dynamicUp': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'easyHeadsOff': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'easyHeadsOn': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'endSpanners': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'endcr': {
'name': 'CrescendoEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'span-dynamic-event', 'crescendo-event', 'event',),
},
'endcresc': {
'name': 'CrescendoEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'span-dynamic-event', 'crescendo-event', 'event',),
},
'enddecr': {
'name': 'DecrescendoEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'span-dynamic-event', 'decrescendo-event', 'event',),
},
'enddecresc': {
'name': 'DecrescendoEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'span-dynamic-event', 'decrescendo-event', 'event',),
},
'enddim': {
'name': 'DecrescendoEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'span-dynamic-event', 'decrescendo-event', 'event',),
},
'endincipit': {
'context-type': 'Staff',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'episemFinis': {
'name': 'EpisemaEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'event', 'episema-event',),
},
'episemInitium': {
'name': 'EpisemaEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'event', 'episema-event',),
},
'escapedBiggerSymbol': {
'name': 'DecrescendoEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'span-dynamic-event', 'decrescendo-event', 'event',),
},
'escapedExclamationSymbol': {
'name': 'CrescendoEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'span-dynamic-event', 'crescendo-event', 'event',),
},
'escapedParenthesisCloseSymbol': {
'name': 'PhrasingSlurEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'event', 'phrasing-slur-event',),
},
'escapedParenthesisOpenSymbol': {
'name': 'PhrasingSlurEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'event', 'phrasing-slur-event',),
},
'escapedSmallerSymbol': {
'name': 'CrescendoEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'span-dynamic-event', 'crescendo-event', 'event',),
},
'espressivo': {
'articulation-type': 'espressivo',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'eventChords': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'expandFullBarRests': {
'context-type': 'Score',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'expect-error': 'expect-error',
'f': {
'name': 'AbsoluteDynamicEvent',
'text': 'f',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'featherDurations': {
'signature': ('ly:music?', 'ly:moment?', 'ly:music?',),
'type': 'ly:music-function?',
},
'fermata': {
'articulation-type': 'fermata',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'fermataMarkup': {
'name': 'MultiMeasureTextEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'multi-measure-text-event',),
},
'ff': {
'name': 'AbsoluteDynamicEvent',
'text': 'ff',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'fff': {
'name': 'AbsoluteDynamicEvent',
'text': 'fff',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'ffff': {
'name': 'AbsoluteDynamicEvent',
'text': 'ffff',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'fffff': {
'name': 'AbsoluteDynamicEvent',
'text': 'fffff',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'flageolet': {
'articulation-type': 'flageolet',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'footnote': {
'signature': ('ly:music?', 'optional?', 'cheap-markup?', 'number-pair?', 'cheap-markup?', 'symbol-list-or-music?',),
'type': 'ly:music-function?',
},
'four-string-banjo': 'four-string-banjo',
'fp': {
'name': 'AbsoluteDynamicEvent',
'text': 'fp',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'frenchChords': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'fullJazzExceptions': 'fullJazzExceptions',
'funkHeads': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'funkHeadsMinor': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'fz': {
'name': 'AbsoluteDynamicEvent',
'text': 'fz',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'germanChords': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'glissando': {
'name': 'GlissandoEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'glissando-event', 'event',),
},
'grace': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'grobdescriptions': {
'signature': ('scheme?', 'list?',),
'type': 'ly:music-function?',
},
'guitar-asus4-tuning': 'guitar-asus4-tuning',
'guitar-dadgad-tuning': 'guitar-dadgad-tuning',
'guitar-drop-c-tuning': 'guitar-drop-c-tuning',
'guitar-drop-d-tuning': 'guitar-drop-d-tuning',
'guitar-lute-tuning': 'guitar-lute-tuning',
'guitar-open-d-tuning': 'guitar-open-d-tuning',
'guitar-open-g-tuning': 'guitar-open-g-tuning',
'guitar-seven-string-tuning': 'guitar-seven-string-tuning',
'guitar-tuning': 'guitar-tuning',
'halfopen': {
'articulation-type': 'halfopen',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'harmonic': {
'name': 'HarmonicEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'harmonic-event',),
},
'harmonicByFret': {
'signature': ('ly:music?', 'number?', 'ly:music?',),
'type': 'ly:music-function?',
},
'harmonicByRatio': {
'signature': ('ly:music?', 'number?', 'ly:music?',),
'type': 'ly:music-function?',
},
'harmonicNote': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'harmonicsOff': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'harmonicsOn': {
'signature': ('ly:music?',),
'type': 'ly:music-function?',
},
'hide': {
'signature': ('ly:music?', 'symbol-list-or-music?',),
'type': 'ly:music-function?',
},
'hideNotes': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'hideSplitTiedTabNotes': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'hideStaffSwitch': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'huge': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'ignatzekExceptionMusic': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'ignatzekExceptions': 'ignatzekExceptions',
'improvisationOff': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'improvisationOn': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'inStaffSegno': {
'signature': ('ly:music?',),
'type': 'ly:music-function?',
},
'input-file-name': None,
'instrument-definitions': 'instrument-definitions',
'instrumentSwitch': {
'signature': ('ly:music?', 'string?',),
'type': 'ly:music-function?',
},
'inversion': {
'signature': ('ly:music?', 'ly:pitch?', 'ly:pitch?', 'ly:music?',),
'type': 'ly:music-function?',
},
'ionian': 'ionian',
'italianChords': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'keepWithTag': {
'signature': ('ly:music?', 'symbol-list-or-symbol?', 'ly:music?',),
'type': 'ly:music-function?',
},
'key': {
'signature': ('ly:music?', 'optional?', 'ly:pitch?', 'optional?', 'list?',),
'type': 'ly:music-function?',
},
'kievanOff': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'kievanOn': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'killCues': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'label': {
'signature': ('ly:music?', 'symbol?',),
'type': 'ly:music-function?',
},
'laissezVibrer': {
'name': 'LaissezVibrerEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'laissez-vibrer-event',),
},
'language': {
'signature': ('void?', 'string?',),
'type': 'ly:music-function?',
},
'languageRestore': {
'signature': ('void?',),
'type': 'ly:music-function?',
},
'languageSaveAndChange': {
'signature': ('void?', 'string?',),
'type': 'ly:music-function?',
},
'large': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'left': -1,
'lheel': {
'articulation-type': 'lheel',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'lineprall': {
'articulation-type': 'lineprall',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'locrian': 'locrian',
'longa': LilyPondDuration(Duration(4, 1), None),
'longfermata': {
'articulation-type': 'longfermata',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'ltoe': {
'articulation-type': 'ltoe',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'lydian': 'lydian',
'major': 'major',
'make-dynamic-script': 'make-dynamic-script',
'make-fretboard-table': 'make-fretboard-table',
'make-simple-dash-definition': 'make-simple-dash-definition',
'make-table-of-contents-markup-list': 'make-table-of-contents-markup-list',
'makeClusters': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'makeDefaultStringTuning': {
'signature': ('void?', 'symbol?', 'list?',),
'type': 'ly:music-function?',
},
'mandolin-tuning': 'mandolin-tuning',
'marcato': {
'articulation-type': 'marcato',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'mark': {
'signature': ('ly:music?', 'optional?', 'scheme?',),
'type': 'ly:music-function?',
},
'maxima': LilyPondDuration(Duration(8, 1), None),
'melisma': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'melismaEnd': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'mergeDifferentlyDottedOff': {
'context-type': 'Staff',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'mergeDifferentlyDottedOn': {
'context-type': 'Staff',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'mergeDifferentlyHeadedOff': {
'context-type': 'Staff',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'mergeDifferentlyHeadedOn': {
'context-type': 'Staff',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'mf': {
'name': 'AbsoluteDynamicEvent',
'text': 'mf',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'midiDrumPitches': 'midiDrumPitches',
'minor': 'minor',
'mixolydian': 'mixolydian',
'modalInversion': {
'signature': ('ly:music?', 'ly:pitch?', 'ly:pitch?', 'ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'modalTranspose': {
'signature': ('ly:music?', 'ly:pitch?', 'ly:pitch?', 'ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'mordent': {
'articulation-type': 'mordent',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'mp': {
'name': 'AbsoluteDynamicEvent',
'text': 'mp',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'musicMap': {
'signature': ('ly:music?', 'procedure?', 'ly:music?',),
'type': 'ly:music-function?',
},
'musicQuotes': 'musicQuotes',
'newSpacingSection': {
'name': 'EventChord',
'type': 'ly:prob?',
'types': ('general-music', 'event-chord', 'simultaneous-music',),
},
'noBeam': {
'name': 'BeamForbidEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'beam-forbid-event',),
},
'noBreak': {
'name': 'LineBreakEvent',
'type': 'ly:prob?',
'types': ('general-music', 'line-break-event', 'break-event', 'event',),
},
'noPageBreak': {
'signature': ('ly:music?',),
'type': 'ly:music-function?',
},
'noPageTurn': {
'signature': ('ly:music?',),
'type': 'ly:music-function?',
},
'normalsize': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'numericTimeSignature': {
'context-type': 'Staff',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'octaveCheck': {
'signature': ('ly:music?', 'ly:pitch?',),
'type': 'ly:music-function?',
},
'omit': {
'signature': ('ly:music?', 'symbol-list-or-music?',),
'type': 'ly:music-function?',
},
'once': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'oneVoice': {
'context-type': 'Voice',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'open': {
'articulation-type': 'open',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'ottava': {
'signature': ('ly:music?', 'integer?',),
'type': 'ly:music-function?',
},
'output-def-music-handler': 'output-def-music-handler',
'output-empty-score-list': 'output-empty-score-list',
'output-suffix': 'output-suffix',
'overrideProperty': {
'signature': ('ly:music?', 'symbol-list?', 'scheme?',),
'type': 'ly:music-function?',
},
'overrideTimeSignatureSettings': {
'signature': ('ly:music?', 'pair?', 'pair?', 'cheap-list?', 'cheap-list?',),
'type': 'ly:music-function?',
},
'p': {
'name': 'AbsoluteDynamicEvent',
'text': 'p',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'pageBreak': {
'signature': ('ly:music?',),
'type': 'ly:music-function?',
},
'pageTurn': {
'signature': ('ly:music?',),
'type': 'ly:music-function?',
},
'palmMute': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'palmMuteOff': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'palmMuteOn': {
'signature': ('ly:music?',),
'type': 'ly:music-function?',
},
'parallelMusic': {
'signature': ('void?', 'list?', 'ly:music?',),
'type': 'ly:music-function?',
},
'parenthesisCloseSymbol': {
'name': 'SlurEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'event', 'slur-event',),
},
'parenthesisOpenSymbol': {
'name': 'SlurEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'event', 'slur-event',),
},
'parenthesize': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'parser': 'parser',
'partCombineListener': 'partCombineListener',
'partcombine': {
'signature': ('ly:music?', 'ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'partcombineApart': {
'name': 'EventChord',
'type': 'ly:prob?',
'types': ('general-music', 'event-chord', 'simultaneous-music',),
},
'partcombineApartOnce': {
'name': 'EventChord',
'type': 'ly:prob?',
'types': ('general-music', 'event-chord', 'simultaneous-music',),
},
'partcombineAutomatic': {
'name': 'EventChord',
'type': 'ly:prob?',
'types': ('general-music', 'event-chord', 'simultaneous-music',),
},
'partcombineAutomaticOnce': {
'name': 'EventChord',
'type': 'ly:prob?',
'types': ('general-music', 'event-chord', 'simultaneous-music',),
},
'partcombineChords': {
'name': 'EventChord',
'type': 'ly:prob?',
'types': ('general-music', 'event-chord', 'simultaneous-music',),
},
'partcombineChordsOnce': {
'name': 'EventChord',
'type': 'ly:prob?',
'types': ('general-music', 'event-chord', 'simultaneous-music',),
},
'partcombineDown': {
'signature': ('ly:music?', 'ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'partcombineForce': {
'signature': ('ly:music?', 'symbol-or-boolean?', 'boolean?',),
'type': 'ly:music-function?',
},
'partcombineSoloI': {
'name': 'EventChord',
'type': 'ly:prob?',
'types': ('general-music', 'event-chord', 'simultaneous-music',),
},
'partcombineSoloII': {
'name': 'EventChord',
'type': 'ly:prob?',
'types': ('general-music', 'event-chord', 'simultaneous-music',),
},
'partcombineSoloIIOnce': {
'name': 'EventChord',
'type': 'ly:prob?',
'types': ('general-music', 'event-chord', 'simultaneous-music',),
},
'partcombineSoloIOnce': {
'name': 'EventChord',
'type': 'ly:prob?',
'types': ('general-music', 'event-chord', 'simultaneous-music',),
},
'partcombineUnisono': {
'name': 'EventChord',
'type': 'ly:prob?',
'types': ('general-music', 'event-chord', 'simultaneous-music',),
},
'partcombineUnisonoOnce': {
'name': 'EventChord',
'type': 'ly:prob?',
'types': ('general-music', 'event-chord', 'simultaneous-music',),
},
'partcombineUp': {
'signature': ('ly:music?', 'ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'partial': {
'signature': ('ly:music?', 'ly:duration?',),
'type': 'ly:music-function?',
},
'partialJazzExceptions': 'partialJazzExceptions',
'partialJazzMusic': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'percussion-style': 'percussion-style',
'phrasingSlurDashPattern': {
'signature': ('ly:music?', 'number?', 'number?',),
'type': 'ly:music-function?',
},
'phrasingSlurDashed': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'phrasingSlurDotted': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'phrasingSlurDown': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'phrasingSlurHalfDashed': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'phrasingSlurHalfSolid': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'phrasingSlurNeutral': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'phrasingSlurSolid': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'phrasingSlurUp': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'phrygian': 'phrygian',
'pipeSymbol': {
'name': 'BarCheck',
'type': 'ly:prob?',
'types': ('general-music', 'bar-check',),
},
'pitchedTrill': {
'signature': ('ly:music?', 'ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'pointAndClickOff': {
'signature': ('void?',),
'type': 'ly:music-function?',
},
'pointAndClickOn': {
'signature': ('void?',),
'type': 'ly:music-function?',
},
'pointAndClickTypes': {
'signature': ('void?', 'symbol-list-or-symbol?',),
'type': 'ly:music-function?',
},
'portato': {
'articulation-type': 'portato',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'powerChordExceptions': 'powerChordExceptions',
'powerChordSymbol': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'powerChords': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'pp': {
'name': 'AbsoluteDynamicEvent',
'text': 'pp',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'ppp': {
'name': 'AbsoluteDynamicEvent',
'text': 'ppp',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'pppp': {
'name': 'AbsoluteDynamicEvent',
'text': 'pppp',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'ppppp': {
'name': 'AbsoluteDynamicEvent',
'text': 'ppppp',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'prall': {
'articulation-type': 'prall',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'pralldown': {
'articulation-type': 'pralldown',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'prallmordent': {
'articulation-type': 'prallmordent',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'prallprall': {
'articulation-type': 'prallprall',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'prallup': {
'articulation-type': 'prallup',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'predefinedFretboardsOff': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'predefinedFretboardsOn': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'pushToTag': {
'signature': ('ly:music?', 'symbol?', 'ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'quoteDuring': {
'signature': ('ly:music?', 'string?', 'ly:music?',),
'type': 'ly:music-function?',
},
'relative': {
'signature': ('ly:music?', 'optional?', 'ly:pitch?', 'ly:music?',),
'type': 'ly:music-function?',
},
'removeWithTag': {
'signature': ('ly:music?', 'symbol-list-or-symbol?', 'ly:music?',),
'type': 'ly:music-function?',
},
'repeatTie': {
'name': 'RepeatTieEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'repeat-tie-event',),
},
'resetRelativeOctave': {
'signature': ('ly:music?', 'ly:pitch?',),
'type': 'ly:music-function?',
},
'retrograde': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'reverseturn': {
'articulation-type': 'reverseturn',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'revertTimeSignatureSettings': {
'signature': ('ly:music?', 'pair?',),
'type': 'ly:music-function?',
},
'rfz': {
'name': 'AbsoluteDynamicEvent',
'text': 'rfz',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'rheel': {
'articulation-type': 'rheel',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'right': 1,
'rightHandFinger': {
'signature': ('ly:event?', 'number-or-string?',),
'type': 'ly:music-function?',
},
'rtoe': {
'articulation-type': 'rtoe',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'sacredHarpHeads': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'sacredHarpHeadsMinor': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'scaleDurations': {
'signature': ('ly:music?', 'fraction?', 'ly:music?',),
'type': 'ly:music-function?',
},
'segno': {
'articulation-type': 'segno',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'semiGermanChords': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'setDefaultDurationToQuarter': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'settingsFrom': {
'signature': ('scheme?', 'optional?', 'symbol?', 'ly:music?',),
'type': 'ly:music-function?',
},
'sf': {
'name': 'AbsoluteDynamicEvent',
'text': 'sf',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'sff': {
'name': 'AbsoluteDynamicEvent',
'text': 'sff',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'sfp': {
'name': 'AbsoluteDynamicEvent',
'text': 'sfp',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'sfz': {
'name': 'AbsoluteDynamicEvent',
'text': 'sfz',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'shape': {
'signature': ('ly:music?', 'list?', 'symbol-list-or-music?',),
'type': 'ly:music-function?',
},
'shiftDurations': {
'signature': ('ly:music?', 'integer?', 'integer?', 'ly:music?',),
'type': 'ly:music-function?',
},
'shiftOff': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'shiftOn': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'shiftOnn': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'shiftOnnn': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'shortfermata': {
'articulation-type': 'shortfermata',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'showSplitTiedTabNotes': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'showStaffSwitch': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'signumcongruentiae': {
'articulation-type': 'signumcongruentiae',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'single': {
'signature': ('ly:music?', 'ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'skip': {
'signature': ('ly:music?', 'ly:duration?',),
'type': 'ly:music-function?',
},
'slashedGrace': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'slurDashPattern': {
'signature': ('ly:music?', 'number?', 'number?',),
'type': 'ly:music-function?',
},
'slurDashed': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'slurDotted': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'slurDown': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'slurHalfDashed': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'slurHalfSolid': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'slurNeutral': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'slurSolid': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'slurUp': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'small': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'smaller': -1,
'snappizzicato': {
'articulation-type': 'snappizzicato',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'sostenutoOff': {
'name': 'SostenutoEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'pedal-event', 'sostenuto-event',),
},
'sostenutoOn': {
'name': 'SostenutoEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'pedal-event', 'sostenuto-event',),
},
'southernHarmonyHeads': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'southernHarmonyHeadsMinor': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'sp': {
'name': 'AbsoluteDynamicEvent',
'text': 'sp',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'spacingTweaks': {
'signature': ('ly:music?', 'list?',),
'type': 'ly:music-function?',
},
'spp': {
'name': 'AbsoluteDynamicEvent',
'text': 'spp',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'dynamic-event', 'absolute-dynamic-event',),
},
'staccatissimo': {
'articulation-type': 'staccatissimo',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'staccato': {
'articulation-type': 'staccato',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'start': -1,
'startAcciaccaturaMusic': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'startAppoggiaturaMusic': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'startGraceMusic': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'startGraceSlur': {
'name': 'SlurEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'event', 'slur-event',),
},
'startGroup': {
'name': 'NoteGroupingEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'note-grouping-event',),
},
'startMeasureCount': {
'name': 'MeasureCounterEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'measure-counter-event', 'span-event', 'event',),
},
'startSlashedGraceMusic': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'startStaff': {
'name': 'StaffSpanEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'event', 'span-event', 'staff-span-event',),
},
'startTextSpan': {
'name': 'TextSpanEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'event', 'text-span-event',),
},
'startTrillSpan': {
'name': 'TrillSpanEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'event', 'trill-span-event',),
},
'stemDown': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'stemNeutral': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'stemUp': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'stop': 1,
'stopAcciaccaturaMusic': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'stopAppoggiaturaMusic': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'stopGraceMusic': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'stopGraceSlur': {
'name': 'SlurEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'event', 'slur-event',),
},
'stopGroup': {
'name': 'NoteGroupingEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'note-grouping-event',),
},
'stopMeasureCount': {
'name': 'MeasureCounterEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'measure-counter-event', 'span-event', 'event',),
},
'stopSlashedGraceMusic': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'stopStaff': {
'name': 'StaffSpanEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'event', 'span-event', 'staff-span-event',),
},
'stopTextSpan': {
'name': 'TextSpanEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'event', 'text-span-event',),
},
'stopTrillSpan': {
'name': 'TrillSpanEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'span-event', 'event', 'trill-span-event',),
},
'stopped': {
'articulation-type': 'stopped',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'storePredefinedDiagram': {
'signature': ('void?', 'hash-table?', 'ly:music?', 'pair?', 'string-or-pair?',),
'type': 'ly:music-function?',
},
'stringTuning': {
'signature': ('scheme?', 'ly:music?',),
'type': 'ly:music-function?',
},
'styledNoteHeads': {
'signature': ('ly:music?', 'symbol?', 'symbol-list-or-symbol?', 'ly:music?',),
'type': 'ly:music-function?',
},
'sustainOff': {
'name': 'SustainEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'pedal-event', 'sustain-event',),
},
'sustainOn': {
'name': 'SustainEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'pedal-event', 'sustain-event',),
},
'tabChordRepeats': {
'signature': ('ly:music?', 'optional?', 'list?', 'ly:music?',),
'type': 'ly:music-function?',
},
'tabChordRepetition': {
'signature': ('void?',),
'type': 'ly:music-function?',
},
'tabFullNotation': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'table-of-contents-markup-list': 'table-of-contents-markup-list',
'tag': {
'signature': ('ly:music?', 'symbol-list-or-symbol?', 'ly:music?',),
'type': 'ly:music-function?',
},
'teeny': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'temporary': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'tenor-ukulele-tuning': 'tenor-ukulele-tuning',
'tenuto': {
'articulation-type': 'tenuto',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'textLengthOff': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'textLengthOn': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'textSpannerDown': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'textSpannerNeutral': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'textSpannerUp': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'thumb': {
'articulation-type': 'thumb',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'tieDashPattern': {
'signature': ('ly:music?', 'number?', 'number?',),
'type': 'ly:music-function?',
},
'tieDashed': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'tieDotted': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'tieDown': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'tieHalfDashed': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'tieHalfSolid': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'tieNeutral': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'tieSolid': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'tieUp': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'tildeSymbol': {
'name': 'TieEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'tie-event', 'event',),
},
'timbales-style': 'timbales-style',
'time': {
'signature': ('ly:music?', 'optional?', 'number-list?', 'fraction?',),
'type': 'ly:music-function?',
},
'times': {
'signature': ('ly:music?', 'fraction?', 'ly:music?',),
'type': 'ly:music-function?',
},
'tiny': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'toc-items': 'toc-items',
'tocItem': {
'signature': ('ly:music?', 'cheap-markup?',),
'type': 'ly:music-function?',
},
'tocItemWithDotsMarkup': 'tocItemWithDotsMarkup',
'toplevel-book-handler': 'toplevel-book-handler',
'toplevel-bookpart-handler': 'toplevel-bookpart-handler',
'toplevel-bookparts': 'toplevel-bookparts',
'toplevel-music-handler': 'toplevel-music-handler',
'toplevel-score-handler': 'toplevel-score-handler',
'toplevel-scores': 'toplevel-scores',
'toplevel-text-handler': 'toplevel-text-handler',
'transpose': {
'signature': ('ly:music?', 'ly:pitch?', 'ly:pitch?', 'ly:music?',),
'type': 'ly:music-function?',
},
'transposedCueDuring': {
'signature': ('ly:music?', 'string?', 'ly:dir?', 'ly:pitch?', 'ly:music?',),
'type': 'ly:music-function?',
},
'transposition': {
'signature': ('ly:music?', 'ly:pitch?',),
'type': 'ly:music-function?',
},
'treCorde': {
'name': 'UnaCordaEvent',
'span-direction': 1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'pedal-event', 'una-corda-event',),
},
'trill': {
'articulation-type': 'trill',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'tupletDown': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'tupletNeutral': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'tupletUp': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'turn': {
'articulation-type': 'turn',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'tweak': {
'signature': ('ly:music?', 'symbol-list-or-symbol?', 'scheme?', 'symbol-list-or-music?',),
'type': 'ly:music-function?',
},
'ukulele-d-tuning': 'ukulele-d-tuning',
'ukulele-tuning': 'ukulele-tuning',
'unHideNotes': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'unaCorda': {
'name': 'UnaCordaEvent',
'span-direction': -1,
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'pedal-event', 'una-corda-event',),
},
'undo': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'unfoldRepeats': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'up': 1,
'upbow': {
'articulation-type': 'upbow',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'upmordent': {
'articulation-type': 'upmordent',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'upprall': {
'articulation-type': 'upprall',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'varcoda': {
'articulation-type': 'varcoda',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'version-seen': 'version-seen',
'verylongfermata': {
'articulation-type': 'verylongfermata',
'name': 'ArticulationEvent',
'type': 'ly:prob?',
'types': ('general-music', 'post-event', 'event', 'articulation-event', 'script-event',),
},
'viola-tuning': 'viola-tuning',
'violin-tuning': 'violin-tuning',
'voiceFour': {
'context-type': 'Voice',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'voiceFourStyle': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'voiceNeutralStyle': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'voiceOne': {
'context-type': 'Voice',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'voiceOneStyle': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'voiceThree': {
'context-type': 'Voice',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'voiceThreeStyle': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'voiceTwo': {
'context-type': 'Voice',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'voiceTwoStyle': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
'types': ('general-music', 'sequential-music',),
},
'void': {
'signature': ('void?', 'scheme?',),
'type': 'ly:music-function?',
},
'walkerHeads': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'walkerHeadsMinor': {
'context-type': 'Bottom',
'name': 'ContextSpeccedMusic',
'type': 'ly:prob?',
'types': ('context-specification', 'general-music', 'music-wrapper-music',),
},
'whiteTriangleMarkup': 'whiteTriangleMarkup',
'withMusicProperty': {
'signature': ('ly:music?', 'symbol?', 'scheme?', 'ly:music?',),
'type': 'ly:music-function?',
},
'xNote': {
'signature': ('ly:music?', 'ly:music?',),
'type': 'ly:music-function?',
},
'xNotesOff': {
'name': 'SequentialMusic',
'type': 'ly:prob?',
| 'types': ('general-music', 'sequential-music',), | 4,888 | lcc_e | python | null | 565c67214cc2708315f1cc7c7ebc698d2249e6e6bee5101c |
|
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import urllib2
from HTMLParser import HTMLParser
import re
import base64
import os
import sys
import Queue
import threading
import signal
from invenio.config import (CFG_CACHEDIR,
CFG_HEPDATA_URL,
CFG_HEPDATA_PLOTSIZE,
CFG_LOGDIR,
CFG_TMPSHAREDDIR,
CFG_HEPDATA_THREADS_NUM,
CFG_HEPDATA_INDEX,
CFG_HEPDATA_FIELD,
CFG_SITE_RECORD,
CFG_SITE_SECURE_URL)
from invenio.utils.json import json
from datetime import datetime
import time
from invenio.legacy import bibrecord
from invenio.base.wrappers import lazy_import
search_engine = lazy_import('invenio.legacy.search_engine')
import cPickle
#imports realted to the harvesting daemon
from invenio.legacy.bibsched.bibtask import task_init, write_message, \
task_set_option, task_has_option, task_get_option, \
task_low_level_submission, task_update_progress, \
task_read_status, task_sleep_now_if_required
# helper functions
def get_record_val(recid, field, ind1 = " ", ind2 = " ", sfcode = "a"):
if not recid:
return ""
rec = search_engine.get_record(recid)
if not rec:
return ""
fs = bibrecord.record_get_field_instances(rec, field, ind1 = ind1,
ind2 = ind2)
if fs:
sfs = bibrecord.field_get_subfield_values(fs[0], sfcode)
if sfs:
return sfs[0]
return ""
def get_record_collaboration(recid):
""" Retrieve a collaboration of a given record"""
return get_record_val(recid, "710", sfcode = "g")
def get_record_arxivid(recid):
"""Retrieve an arxiv identifier from a record of a given number"""
return get_record_val(recid, "037", sfcode = "a")
# URL extensions that do not lead to additional formats
ACCEPTED_FORMATS = {
"plain text" : "plain.txt",
"AIDA" : "aida",
"PYROOT": "pyroot.py",
"YODA" : "yoda",
"ROOT" : "root",
"mpl" : "mpl",
"jhepwork" : "jhepwork.py"
}
def download_with_retry(data_url):
last_e = None
sleeptime = 2
for retry_num in xrange(5):
try:
f = urllib2.urlopen(data_url)
content = f.read()
return content
except Exception, e:
last_e = e
time.sleep(sleeptime)
sleeptime = sleeptime * 2
raise Exception("Failed to download url. Last error code: %s " %( last_e.code, ))
class Paper(object):
def __init__(self):
self.datasets = []
self.comment = ""
self.additional_files = []
self.systematics = ""
self.additional_data_links = []
def __repr__(self):
return ("<Paper object comment=%s, additional_files=%s, " + \
"systematics=%s, additional_data_links=%s>") % \
(repr(self.comment), repr(self.additional_files),
repr(self.systematics), repr(self.additional_data_links))
@staticmethod
def create_from_record(rec):
"""Create a paper object from the record"""
paper = Paper()
# reading additional data links
fs = bibrecord.record_get_field_instances(rec, "856", ind1="4",
ind2=" ")
paper.additional_data_links = []
if fs:
for f in fs:
fsf = bibrecord.field_get_subfield_values(f, "3")
if fsf and fsf[0] == "ADDITIONAL HEPDATA":
fsf_href = bibrecord.field_get_subfield_values(f, "u")
fsf_desc = bibrecord.field_get_subfield_values(f, "y")
if fsf_href and fsf_desc:
paper.additional_data_links.append({
"href" : fsf_href[0],
"description" : fsf_desc[0]})
# reading the comment
fs = bibrecord.record_get_field_instances(rec, "520", ind1 = " ", ind2= " ")
if fs:
for f in fs:
sfs = bibrecord.field_get_subfield_values(f, "9")
if sfs and sfs[0].strip() == "HEPDATA":
sfs = bibrecord.field_get_subfield_values(f, "h")
if sfs:
paper.comment = sfs[0].strip()
return paper
def get_diff_marcxml(self, rec2):
"""Returns a code that will transform record passed as
an argument into the current one.
If there are no changes, the method returns None
"""
outrec = {}
# comparing links to external data
correct_links = bibrecord.record_get_field_instances( \
self.generate_additional_datalinks(), "856", ind1 = "4", ind2 = " ")
existing_links = filter( \
lambda field: bibrecord.field_get_subfield_values(field, "3") and \
bibrecord.field_get_subfield_values(field, "3")[0].strip() == \
"ADDITIONAL HEPDATA" ,
bibrecord.record_get_field_instances(rec2, "856", ind1="4",
ind2 = " "))
# now comparing correct with existing - first we have to sort !
# sorting alphabetically !
fgsv = bibrecord.field_get_subfield_values
def links_comparer(link1, link2):
# first try to compare on the description
sfs1 = fgsv(link1, "y")
sfs2 = fgsv(link2, "y")
if sfs1 and sfs2:
if sfs1[0] > sfs2[0]:
return True
if sfs1[0] < sfs2[0]:
return False
else:
if sfs1 and not sfs2:
return True
if (not sfs1) and sfs2:
return False
# if failed, compare on the link. In correct situations
# we should not get here
sfs1 = fgsv(link1, "u")
sfs2 = fgsv(link2, "u")
if sfs1 and sfs2:
return sfs1[0]>sfs2[0]
else:
if sfs1 and not sfs2:
return True
if (not sfs1) and sfs2:
return False
return False # finally they are equal. We shold never get here
# in the case of well-formed MARC entries -
# the world is not perfect and we will get here for errors in MARC
correct_links.sort(cmp = links_comparer)
existing_links.sort(cmp = links_comparer)
cmp2 = lambda link1, link2: fgsv(link1, "y") == fgsv(link2, "y") and \
fgsv(link1, "u") == fgsv(link2, "u")
have_to_correct = not reduce( \
lambda prev, links: prev and cmp2(links[0], links[1]),
zip(correct_links, existing_links),
len(correct_links) == len(correct_links))
correct_links.sort()
if have_to_correct:
to_upload = filter( \
lambda field: not (bibrecord.field_get_subfield_values(field, "3") and \
bibrecord.field_get_subfield_values(field, "3") \
[0].strip() == \
"ADDITIONAL HEPDATA") ,
bibrecord.record_get_field_instances(rec2, "856", ind1="4",
ind2 = " ")) + \
correct_links
bibrecord.record_add_fields(outrec, "856", to_upload)
# HEPDATA comment
fs = bibrecord.record_get_field_instances(rec2, "520",
ind1 = " ", ind2 = " ")
existing_comment = ""
correct_comment = self.comment.strip()
new_fields = []
if fs:
for f in fs:
sfs = bibrecord.field_get_subfield_values(f, "9")
if sfs and sfs[0].strip() == "HEPDATA":
# we have found THE CAPTION
sfs = bibrecord.field_get_subfield_values(f, "h")
if sfs:
existing_comment = sfs[0].strip()
else:
new_fields.append(f)
if existing_comment != correct_comment:
bibrecord.record_add_fields(outrec, "520", new_fields)
if correct_comment:
bibrecord.record_add_field(outrec, "520", \
subfields = [("9", "HEPDATA")] \
+ ((correct_comment or []) and \
[("h", correct_comment)]))
if outrec:
#If the output was different than empty so far, we are copying the
# record identifier
ids = bibrecord.record_get_field_values(rec2, "001")
if ids:
bibrecord.record_add_field(outrec, "001", \
controlfield_value = str(ids[0]))
return bibrecord.record_xml_output(outrec)
else:
return None
def generate_additional_datalinks(self):
""" Return a record containing only fields encoding
aditional data links
"""
rec = {}
for adl in self.additional_data_links:
bibrecord.record_add_field(rec, "856", ind1 = "4", ind2 = " ", \
subfields = [ \
("3", "ADDITIONAL HEPDATA"),
("u", adl["href"]),
("y", adl["description"]),])
return rec
class Dataset(object):
"""Represents a single dataset saved in the document
we represent only
"""
def __init__(self):
self.column_titles = []
self.column_headers = []
self.data_qualifiers = []
self.data = [] # row by row
self.comments = ""
self.name = ""
self.additional_files = []
self.num_columns = 0
self.location = ""
self.position = 0 #position within the data record
self.additional_data_links = []
self.data_plain = ""
self.recid = None
self.x_columns = 0
self.y_columns = 0
self.title = ""
def __repr__(self):
return "Auxiliary information: " + repr(self.data_qualifiers) + \
" Headers: " + repr(self.column_headers) + " Data: " + repr(self.data)
def get_type(self):
"""Determine type based on the location"""
first_char = (len(self.location.strip()) > 0 or "") and \
self.location.strip().lower()[0]
if first_char == "F":
return "FIGURE"
elif first_char == "T":
return "TABLE"
else:
return "DATASET"
def get_marcxml(self, parent_recid = None):
"""Produces a ready to upload MARC XML
If some files have to be attached to a record, they are
written in the Invenio installation's temporary directory and
referenced from the XML code"""
return self.get_diff_marcxml({}, parent_recid)
empty_data_str = cPickle.dumps({})
def get_diff_marcxml(self, rec2, parent_recid, data_str=None, data_plain=None, force_reupload=False):
"""Produces a MARC XML allowing to modify passed dataset record
into the current dataset. Necessary files are created in the
temporary directory.
If there are no changes to be made, None is returned.
@param rec2: The dataset to compare with
@type rec2: BibRecord
@param recid: The identifier of the record prepresenting dataset
@type recid: Integer
@param parent_recid: The record identifier of the main MARC record
@type parent_recid: Integer
@rtype: String
@returns: MARC XML which modifies the passed record into the one
described by current Dataset instance
"""
outrec = {} # the output record
def addf(*args, **args2):
"""Add field to the output record"""
bibrecord.record_add_field(outrec, *args, **args2)
def get_subfield_with_defval(tag, ind1 = " ", ind2 = " ",
sfcode = "a", default = ""):
"""Retrieve the first vale of a subfield or default"""
fs = bibrecord.record_get_field_instances(rec2, tag, ind1, ind2)
if fs:
sfs = bibrecord.field_get_subfield_values(fs[0], sfcode)
if sfs:
return sfs[0].strip()
return default
# processing the title
existing_title = get_subfield_with_defval(tag = "245", sfcode = "a", default="")
if existing_title != self.title:
addf("245", ind1 = " ", ind2 = " ", subfields = \
[("9", "HEPDATA"), ("a", self.title)])
# processing number of x and y columns
existing_x = int(get_subfield_with_defval(tag = "911", sfcode = "x", default=0))
existing_y = int(get_subfield_with_defval(tag = "911", sfcode = "y", default=0))
correct_x = self.x_columns
correct_y = self.y_columns
if correct_x != existing_x or correct_y != existing_y:
addf("911", ind1 = " ", ind2=" ", subfields = \
[("x", str(self.x_columns)),
("y", str(self.y_columns))])
# processing caption
fs = bibrecord.record_get_field_instances(rec2, "520",
ind1 = " ", ind2 = " ")
existing_comment = ""
correct_comment = self.comments.strip()
new_fields = []
if fs:
for f in fs:
sfs = bibrecord.field_get_subfield_values(f, "9")
if sfs and sfs[0].strip() == "HEPDATA":
# we have found THE CAPTION
sfs = bibrecord.field_get_subfield_values(f, "h")
if sfs:
existing_comment = sfs[0].strip()
else:
new_fields.append(f)
if existing_comment != correct_comment:
bibrecord.record_add_fields(outrec, "520", new_fields)
if correct_comment:
addf("520", \
subfields = [("9", "HEPDATA")] \
+ ((correct_comment or []) and \
[("h", correct_comment)]))
# collaboration
existing_collaboration = get_subfield_with_defval(tag = "710",
sfcode = "g")
correct_collaboration = get_record_collaboration(parent_recid).strip()
if correct_collaboration and \
existing_collaboration != correct_collaboration:
addf("710", ind1= " ", ind2 = " ",
subfields = [("g", correct_collaboration)])
# Link to the original record and the location
if parent_recid:
existing_id = get_subfield_with_defval(tag = "786", sfcode = "w")
existing_arXivId = get_subfield_with_defval(tag = "786",
sfcode = "r")
existing_location = get_subfield_with_defval(tag = "786",
sfcode = "h")
correct_location = self.location.strip()
correct_arXivId = get_record_arxivid(parent_recid).strip()
correct_id = str(parent_recid).strip()
existing_position = get_subfield_with_defval(tag = "786",
sfcode = "q")
correct_position = self.position
# import rpdb2; rpdb2.start_embedded_debugger('password', fAllowRemote=True)
if existing_location != correct_location or \
existing_arXivId != correct_arXivId or \
existing_id != correct_id or \
int(existing_position) != int(correct_position):
subfields = [("w", correct_id), ("q", str(correct_position))]
if correct_arXivId:
subfields.append(("r", correct_arXivId))
if correct_location:
subfields.append(("h", correct_location))
addf("786", ind1 = " ", ind2 = " ", subfields = subfields)
else:
write_message("No dataset parent recid!")
# dataset type (determined based on the location)
correct_type = self.get_type().strip()
existing_type = get_subfield_with_defval(tag = "336", sfcode = "t")
# print "Types: %s %s" % (correct_type, existing_type)
if existing_type != correct_type:
addf("336", ind1 = " ", ind2 = " ", subfields=[("t", correct_type)])
#correcting the collection
correct_collection = "DATA"
existing_collection = get_subfield_with_defval(tag = "980",
sfcode = "a")
if correct_collection != existing_collection:
addf("980", ind1 = " ", ind2 = " ",
subfields=[("a", correct_collection)])
# data qualifiers
correct_qualifiers = bibrecord.record_get_field_instances(
self.generate_qualifiers(parent_recid), "653",
ind1 = "1", ind2 = " ")
present_qualifiers = bibrecord.record_get_field_instances(rec2, "653",
ind1 = "1",
ind2 = " ")
# order doe not matter ! we will sort them lexicographically
# before comparing !
def qualifier_comparer(q1, q2):
""" compare two qualifier fields """
sfs1 = bibrecord.field_get_subfield_values(q1, "r")
sfs2 = bibrecord.field_get_subfield_values(q2, "r")
if sfs1 and sfs2:
if sfs1[0] > sfs2[0]:
return True
if sfs2[0] > sfs1[0]:
return False
else:
# reaction is always bigger than non-reaction
if sfs1 and not sfs2:
return True
elif sfs2 and not sfs1:
return False
else:
# compare on keys
sfs1 = bibrecord.field_get_subfield_values(q1, "k")
sfs2 = bibrecord.field_get_subfield_values(q2, "k")
if sfs1 and not sfs2:
return True
elif sfs2 and not sfs1:
return False
if sfs1 and sfs2 and sfs1[0] > sfs2[0]:
return True
elif sfs1 and sfs2 and sfs2[0] > sfs1[0]:
return False
else:
sfs1 = bibrecord.field_get_subfield_values(q1, "v")
sfs2 = bibrecord.field_get_subfield_values(q2, "v")
if sfs1 and not sfs2:
return True
elif sfs2 and not sfs1:
return False
elif sfs1 and sfs2:
return sfs1[0] > sfs2[0]
else:
return False
# compare on columns
sfs1 = " ".join(bibrecord.field_get_subfield_values(q1, "c"))
sfs2 = " ".join(bibrecord.field_get_subfield_values(q2, "c"))
return sfs1 > sfs2
correct_qualifiers.sort(cmp = qualifier_comparer)
present_qualifiers.sort(cmp = qualifier_comparer)
fgsv = bibrecord.field_get_subfield_values
qualifiers_eq = lambda x, y: \
fgsv(x, "r") == fgsv(y, "r") and \
fgsv(x, "k") == fgsv(y, "k") and \
fgsv(x, "v") == fgsv(y, "v") and \
set(fgsv(x, "c")) == set(fgsv(y, "c"))
if not reduce(lambda x, y: x and qualifiers_eq(y[0], y[1]), \
zip(correct_qualifiers, present_qualifiers), \
(len(correct_qualifiers) == len(present_qualifiers))):
bibrecord.record_add_fields(outrec, "653", correct_qualifiers)
# columns ( the order does not matter)
present_columns = bibrecord.record_get_field_instances(rec2, "910")
correct_columns = bibrecord.record_get_field_instances(
self.generate_columns(), "910")
column_cmp = lambda x, y: \
int(bibrecord.field_get_subfield_values(x, "n")[0]) > \
int(bibrecord.field_get_subfield_values(y, "n")[0])
fgsv = bibrecord.field_get_subfield_values
columns_eq = lambda x, y: \
fgsv(x, "n") == fgsv(y, "n") and \
fgsv(x, "t") == fgsv(y, "t") and \
fgsv(x, "d") == fgsv(y, "d")
correct_columns.sort(cmp = column_cmp)
present_columns.sort(cmp = column_cmp)
(not reduce(lambda x, y: x and columns_eq(y[0], y[1]), \
zip(correct_columns, present_columns), \
len(correct_columns) == len(present_columns))) and \
bibrecord.record_add_fields(outrec, "910", \
correct_columns)
# data of the table
existing_data = {}
try:
existing_data = cPickle.loads(data_str)
except:
existing_data = []
if (not data_str) or (not self.compare_data(existing_data)) or force_reupload:
# we retreive plain data only if table data is different
self.retrieve_plain_data()
(fname_int, fname_plain) = self.write_data_to_tmpfile()
if fname_int:
bibrecord.record_add_field(outrec, "FFT", subfields = [ \
("a", fname_int), \
("t", "Data"), \
("n", "Data"), \
("f", ".data"), \
("o", "HIDDEN"), \
("d", "data extracted from the table") \
])
if fname_plain:
bibrecord.record_add_field(outrec, "FFT", subfields = [ \
("a", fname_plain), \
("t", "Data"), \
("n", "Data"), \
("f", ".txt"), \
("d", "data extracted from the table") \
])
if outrec:
ids = bibrecord.record_get_field_values(rec2, "001")
if ids:
addf("001", controlfield_value = str(ids[0]))
return bibrecord.record_xml_output(outrec)
return None
def retrieve_plain_data(self):
data_url = urllib2.urlparse.urljoin(CFG_HEPDATA_URL,
reduce( \
lambda x, y: x or (y[1] == "plain text" and y[0]) ,
self.additional_files, ""))
try:
self.data_plain = download_with_retry(data_url)
except Exception, e:
print "Impossible to retrieve the plain text format related to a dataset. URL: %s "% (data_url, )
self.data_plain = ""
return self.data_plain
def generate_columns(self):
"""
Generates an Invenio record containing only fields that describe
columns present in the dataset
"""
# Application of map/reduce to Invenio ;)
import operator
return reduce(lambda rec, sf: \
(bibrecord.record_add_field(rec, "910", subfields=sf)\
and rec),
map(lambda num, title, header: \
reduce(
operator.add, [[("n", num)],
(title or []) and [("t", title or "")],
(header or []) and \
[("d", header or "")]], []), \
map(str, range(self.num_columns)), \
reduce(operator.add,
[[col_t["content"]] * col_t["colspan"] \
for col_t in self.column_titles], []), \
reduce(operator.add,
[[col_h["content"]] * col_h["colspan"] \
for col_h in self.column_headers], [])),
{}) # start with {} as initial record
def generate_qualifiers(self, master_recid):
"""Generate fields describing data qualifiers of a current dataset
Returns a record containing only fields with necessary qualifiers
"""
rec = {} # we will start adding to an empty record
for dq_line in self.data_qualifiers:
current_column = 0
for dq in dq_line:
col_pos = dq["content"].find(":")
subfields = []
if col_pos == -1:
log_msg = ("""Data qualifier "%(dq)s" does not contain""" +\
""" colon. Record number: %(recid)s """) % {
"dq" : dq["content"],
"recid" : str(master_recid)
}
hepdata_log("harvesting", log_msg)
dq_key = ""
dq_value = dq["content"].strip()
else:
dq_key = dq["content"][:col_pos].strip()
dq_value = dq["content"][col_pos + 1:].strip()
if dq_key == "RE": # the reaction data
subfields.append(("r", dq_value))
else:
subfields.append(("k", dq_key))
subfields.append(("v", dq_value))
# now processing columns belonging
subfields += [("c", str(col_num)) for col_num in \
xrange(current_column,
current_column + dq["colspan"])]
current_column += dq["colspan"]
bibrecord.record_add_field(rec, "653", ind1 = "1",
ind2 = " ", subfields = subfields)
return rec
@staticmethod
def create_from_record(rec, data_str, parent_recid, data_plain):
"""Creates an instance from a record"""
ds = Dataset()
ds.data_plain = data_plain
ds.title = ""
fs = bibrecord.record_get_field_instances(rec, "245", " ", " ")
if fs:
sfs = bibrecord.field_get_subfield_values(fs[0], "a")
if sfs:
ds.title = sfs[0].strip()
# filling recid
ds.recid = bibrecord.record_get_field_value(rec, "001")
# comments:
fs = filter(lambda field: bibrecord.field_get_subfield_values(field, "9") and \
bibrecord.field_get_subfield_values(field, "9")[0] == \
"HEPDATA", \
bibrecord.record_get_field_instances(rec, "520", \
ind1 = " ", \
ind2 = " "))
if fs:
sfs = bibrecord.field_get_subfield_values(fs[0], "h")
if sfs:
ds.comments = sfs[0]
# reading the position
fs = filter(lambda field: \
bibrecord.field_get_subfield_values(field, "w") and \
int(bibrecord.field_get_subfield_values(field, "w")[0]) \
== parent_recid,
bibrecord.record_get_field_instances(rec, "786"))
if fs:
sfs = bibrecord.field_get_subfield_values(fs[0], "q")
if sfs:
ds.position = int(sfs[0])
# reading numbers of x and y columns
fs = bibrecord.record_get_field_instances(rec, "911")
ds.x_columns = 0
ds.y_columns = 0
if fs:
ds.x_columns = int(bibrecord.field_get_subfield_values(fs[0], "x")[0])
ds.y_columns = int(bibrecord.field_get_subfield_values(fs[0], "y")[0])
ds.num_columns = ds.x_columns + ds.y_columns
#reading columns - they are necessary for reading data qualifiers
fs = bibrecord.record_get_field_instances(rec, "910")
columns = []
for f in fs:
column = {"pos": -1, "header": "", "title":""}
sfs = bibrecord.field_get_subfield_values(f, "n")
if sfs:
column["pos"] = sfs[0]
sfs = bibrecord.field_get_subfield_values(f, "t")
if sfs:
column["title"] = sfs[0]
sfs = bibrecord.field_get_subfield_values(f, "d")
if sfs:
column["header"] = sfs[0]
columns.append(column)
columns.sort(cmp = lambda x, y: x["pos"] > y["pos"])
ds.column_headers = []
ds.column_titles = []
cur_header = None
prev_header = None # previous header string
cur_title = None
prev_title = None # previous title string
for col in columns:
if col["title"] == prev_title:
cur_title["colspan"] += 1
else:
if cur_title:
ds.column_titles.append(cur_title)
cur_title = {"content" : col["title"], "colspan" : 1}
prev_title = col["title"]
if col["header"] == prev_header:
cur_header["colspan"] += 1
else:
if cur_header:
ds.column_headers.append(cur_header)
cur_header = {"content" : col["header"], "colspan" : 1}
prev_header = col["header"]
if cur_title:
ds.column_titles.append(cur_title)
if cur_header:
ds.column_headers.append(cur_header)
#reading data qualifiers -> we have to pack them into table !
qualifiers = [("", [])] # an array with all possible qualifiers
# first reading qualifiers
# reading qualifiers requires assigning them places in the readable
# table here we try to compactify qualifiers by leaving as few space
# in the table as possible
fs = bibrecord.record_get_field_instances(rec, "653", ind1="1")
for f in fs:
# first decoding the qualifier
cur_qual = ""
sfs = bibrecord.field_get_subfield_values(f, "r")
if sfs: # this is a reaction
cur_qual = "RE : %s" % (sfs[0],)
sfs = bibrecord.field_get_subfield_values(f, "k")
sfs2 = bibrecord.field_get_subfield_values(f, "v")
if sfs and sfs2: # this is a regular key-value data qualifeir
cur_qual = "%s : %s" % (sfs[0], sfs2[0])
# read columns
columns = []
sfs = bibrecord.field_get_subfield_values(f, "c")
for sf in sfs:
if int(sf) >= ds.num_columns:
hepdata_log("reconstruction", "Data qualifiers occuly more columns that exist in a dataset. Qualifier %s in column %s ... ignoring exceed. rec: %s" % (cur_qual, str(sf), str(rec), ))
else:
columns.append(int(sf))
columns.sort()
qualifiers.append((cur_qual, columns))
qualifiers.sort(cmp = lambda x, y: len(y[1]) - len(x[1]))
qualifier_rows = [] # we start with an empty assignment
for (q_pos, qualifier) in \
zip(xrange(len(qualifiers) - 1), qualifiers[1:]):
# searching for a row that can be used for this qualifier
blocker = True # there was something "blocking" in the -1 line...a "reason" why data has not been put there
elected_row = 0 # 0th row preelected
while blocker and elected_row < len(qualifier_rows):
blocker = False
for col in qualifier[1]:
blocker = blocker or (qualifier_rows[elected_row][col] != 0)
if blocker:
elected_row += 1
if blocker:
# adding new line to the list (if necessary):
qualifier_rows.append([0] * ds.num_columns)
# assigning the qualifier to the elected line
for col in qualifier[1]:
qualifier_rows[elected_row][col] = q_pos + 1
# real position is shifted by 1
# now translating into the regular qualifiers array
ds.data_qualifiers = []
for row in qualifier_rows:
cur_row = []
ds.data_qualifiers.append(cur_row)
prev_data = None
cur_width = 0
for cell in row:
if prev_data == cell:
cur_width += 1
else:
if cur_width > 0:
cur_row.append({"content": qualifiers[prev_data][0],
"colspan" : cur_width})
cur_width = 1
prev_data = cell
# append the remaining one
if cur_width > 0:
cur_row.append({"content": qualifiers[prev_data][0],
"colspan" : cur_width})
# Checking if the data content is up to date (or exists at all) and upload
# reading the data -> from a stream provided as an argument
# (stored as an attached record in the database)
try:
ds.data = cPickle.loads(data_str)
except:
ds.data = []
return ds
def compare_data(self, ds):
"""Compare current data with the dataset passed as an argument
@parameter dataset to compare with (the same as the content of Dataset.data)
@type ds List
@return True if data in both datasets are equal, otherwise False
@returntype boolean"""
try:
return reduce(lambda prev, datalines: prev and reduce( \
lambda prev, datas: prev and \
datas[0]["colspan"] == datas[1]["colspan"] and \
datas[0]["content"] == datas[1]["content"], \
zip(datalines[0], datalines[1]), \
len(datalines[0]) == len(datalines[1])), \
zip(ds, self.data), \
len(ds) == len(self.data))
except Exception, e:
import rpdb2; rpdb2.start_embedded_debugger('password')
def write_data_to_tmpfile(self):
"""Writes data from the dataset into a temporary file and returns
the file name. This file can be attached into the record
@return Names of the files where data has been written (internal_data, plain_data)
@returntype (string, string)"""
import tempfile
if cPickle.dumps(self.data):
fdesc, fname = tempfile.mkstemp(suffix = ".data", prefix = "data_", \
dir = CFG_TMPSHAREDDIR)
os.write(fdesc, cPickle.dumps(self.data))
os.close(fdesc)
else:
fname = None
if self.data_plain:
fdesc, fname2 = tempfile.mkstemp(suffix = ".txt", prefix = "data_", \
dir = CFG_TMPSHAREDDIR)
os.write(fdesc, self.data_plain)
os.close(fdesc)
else:
fname2 = None
return (fname, fname2)
class DatasetParser(object):
def __init__(self, owner, dataset):
self.owner = owner
self.dataset = dataset
self.parsingComments = False
self.parsingLocation = True # first comes location, than after <br> comes comment
self.parsingOtherTag = 0
def handle_starttag(self, tag, attrs):
if self.parsingOtherTag > 0:
self.parsingOtherTag += 1
else:
if tag == "br":
self.parsingLocation = False
self.parsingComments = True
elif tag == "a":
# search for those links which have href but it does not
# end with one of marked suffixes
for at in attrs:
if at[0] == "href":
link = strip_link_session_id(at[1])
for suf in ACCEPTED_FORMATS.keys():
if link.endswith(ACCEPTED_FORMATS[suf]):
self.dataset.additional_files.append([link, suf])
self.parsingOtherTag = 1
else:
self.parsingOtherTag = 1
def handle_endtag(self, tag):
if self.parsingOtherTag > 0:
self.parsingOtherTag -= 1
if tag == "div":
self.owner.exit_special_mode()
self.parsingComments = False
def handle_charref(self, name):
if self.parsingOtherTag > 0:
return
refstring = "&#" + name + ";"
if self.parsingComments:
self.dataset.comments += refstring
elif self.parsingLocation:
self.dataset.location += refstring
def handle_entityref(self, name):
if self.parsingOtherTag > 0:
return
if name == "nbsp":
return
refstring = "&" + name + ";"
if self.parsingComments:
self.dataset.comments += refstring
elif self.parsingLocation:
self.dataset.location += refstring
def handle_data(self, data):
if self.parsingOtherTag > 0:
return
if self.parsingComments:
self.dataset.comments += data
elif self.parsingLocation:
self.dataset.location += data
def exit_special_mode(self):
pass
# Parsing : this is a very dangerous method of parsing the HTML page ... will fail and possibly corrupt data
# whenever the maintainer of HEPData decides to modify the format of pages
class DataBoxParser(object):
""" a special parser for data tables """
def __init__(self, owner, dataset):
"""
@param owner - The object owning the current one - a global parser
"""
self.dataset = dataset
self.state = "columntitles"
self.owner = owner
self.current_line = []
self.current_cell = None
def handle_starttag(self, tag, attrs):
if tag == "tr":
self.current_line = []
if ("class", "xyheaders") in attrs:
self.state = "headers"
elif self.state == "headers":
self.state = "predata" # things before headers and data ...
elif self.state == "predata":
self.state = "data"
elif ("class", "altformats") in attrs:
self.state = "footer"
if tag in ("th", "td"):
if self.state == "footer":
self.dataset.x_columns += 1
colspan = 1
for attr in attrs:
if attr[0] == "colspan":
colspan = int(attr[1])
axis = ""
if ("class", "xval") in attrs:
axis = "x"
if ("class", "yval") in attrs:
axis = "y"
self.current_cell = {"colspan": colspan, "content": "", "axis": axis}
if tag in ("a"):
if self.state == "footer":
if ("title", "Display this table in graphical form") in attrs:
self.dataset.y_columns += 1
self.dataset.x_columns -= 1
def handle_charref(self, name):
if self.current_cell:
self.current_cell["content"] += "&#" + name + ";"
def handle_entityref(self, name):
if name == "nbsp":
return
if self.current_cell:
self.current_cell["content"] += "&" + name + ";"
def handle_data(self, data):
if self.current_cell:
self.current_cell["content"] += data
def handle_endtag(self, tag):
if tag == "table":
#exiting the data-reading mode
self.owner.exit_special_mode()
if tag == "tr":
to_add = None
if self.state == "auxiliary":
to_add = self.dataset.data_qualifiers
elif self.state == "headers":
self.dataset.column_headers = self.current_line
elif self.state == "data":
to_add = self.dataset.data
elif self.state == "columntitles":
self.state = "auxiliary"
self.dataset.column_titles = self.current_line
if not to_add is None:
to_add.append(self.current_line)
self.current_line = []
if tag in ("td", "th"):
self.current_cell["content"] = self.current_cell["content"].strip()
self.current_line.append(self.current_cell)
class AdditionalDataParser(object):
def __init__(self, owner, paper):
self.owner = owner
self.paper = paper
self.paper.additional_data_links = []
self.current_link = None
def handle_starttag(self, tag, attrs):
#we assume there art no subdivs inside this
if tag == "a":
self.current_link = {"description": ""}
for attr in attrs:
self.current_link[attr[0]] = attr[1]
self.paper.additional_data_links.append(self.current_link)
def handle_endtag(self, tag):
if tag == "div":
self.owner.exit_special_mode()
elif tag == "a":
self.current_link = None
def handle_charref(self, name):
if self.current_link:
self.current_link["description"] += "&#" + name + ";"
def handle_entityref(self, name):
if name == "nbsp":
return
if self.current_link:
self.current_link["description"] += "&" + name + ";"
def handle_data(self, data):
if self.current_link:
self.current_link["description"] += data
class SystematicsParser(object):
# Systematics we will remember as a table
def __init__(self, owner, paper):
self.owner = owner
self.paper = paper
def handle_starttag(self, tag, attrs):
#we assume there art no subdivs inside this
self.paper.systematics += "<" + tag + " " + \
(" ".join([ s[0] + "=\"" + s[1] + "\"" for s in attrs])) + ">"
def handle_endtag(self, tag):
if tag == "div":
self.owner.exit_special_mode()
else:
self.paper.systematics += "</" + tag + ">"
def handle_charref(self, name):
self.paper.systematics += "&#" + name + ";"
def handle_entityref(self, name):
if name == "nbsp":
return
self.paper.systematics += "&" + name + ";"
def handle_data(self, data):
self.paper.systematics += data
class HEPParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.special_mode = None
self.paper = Paper()
self.parsing_paper_comment = False
def exit_special_mode(self):
self.special_mode = None
def parse_paperbox(self):
"""started parsing the paper box"""
pass
def parse_datasetbox(self):
dataset = Dataset()
self.paper.datasets += [dataset]
self.special_mode = DatasetParser(self, dataset)
def parse_dataset(self):
"""parse the data table"""
dataset = self.paper.datasets[-1]
self.special_mode = DataBoxParser(self, dataset)
def parse_systematics(self):
self.special_mode = SystematicsParser(self, self.paper)
def parse_paper_comment(self):
self.parsing_paper_comment = True
def parse_additional_data(self):
self.special_mode = AdditionalDataParser(self, self.paper)
def handle_data(self, data):
if self.special_mode != None:
self.special_mode.handle_data(data)
elif self.parsing_paper_comment:
self.paper.comment += data
def handle_charref(self, name):
refstring = "&#" + name + ";"
if self.special_mode != None:
self.special_mode.handle_charref(name)
elif self.parsing_paper_comment:
self.paper.comment += refstring
def handle_entityref(self, name):
if name == "nbsp":
return
refstring = "&" + name + ";"
if self.special_mode != None:
self.special_mode.handle_entityref(name)
elif self.parsing_paper_comment:
self.paper.comment += refstring
def handle_starttag(self, tag, attrs):
if self.special_mode != None:
self.special_mode.handle_starttag(tag, attrs)
elif tag == "div":
if ("class", "paperbox") in attrs:
self.parse_paperbox()
if ("class", "datasetbox") in attrs:
self.parse_datasetbox()
if ("class", "systematics") in attrs:
self.parse_systematics()
if ("class", "extradata") in attrs:
self.parse_additional_data()
elif tag == "table" and ("class", "dataset") in attrs:
# we have to add real data to previous dataset
self.parse_dataset()
elif tag == "p" and ("class", "papercomment") in attrs:
self.parse_paper_comment()
# elif tag == "br" and self.parsing_paper_comment:
# self.paper.comment += "<br>"
elif tag == "a":
# search for those links which have href but it does not
# end with one of marked suffixes
for at in attrs:
if at[0] == "href":
link = strip_link_session_id(at[1])
for suf in ACCEPTED_FORMATS.keys():
if link.endswith(ACCEPTED_FORMATS[suf]):
self.paper.additional_files.append([link, suf])
def handle_endtag(self, tag):
if self.special_mode != None:
self.special_mode.handle_endtag(tag)
if tag == "p" and self.parsing_paper_comment:
self.parsing_paper_comment = False
def strip_link_session_id(st):
return st.split(";jsessionid")[0]
def wash_code(content):
"""Correcting the HEPData XHTML code so that it can be parsed\
@return correct code - string
"""
#filtering out cases of having incorrect closing tags containing attributes
res = re.split("</([a-zA-Z0-9]+)\s[^>]*>", content)
for pos in range(1, len(res), 2):
res[pos] = "</" + res[pos] + ">"
content = "".join(res)
# in the systematics section there are errors with enclosing colspans in
# quotes
res = re.split("colspan=([0-9]+)\'", content)
for pos in range(1, len(res), 2):
res[pos] = "colspan='" + res[pos] + "'"
content = "".join(res)
return content
def download_paper(page_url, recid):
try:
content = wash_code(download_with_retry(page_url))
except Exception, e:
write_message("Error when retrieving dataset. URL: %s" %(page_url, ))
raise e
parser = HEPParser()
parser.feed(content)
paper = parser.paper
# fixing column lengths and titles
import operator
get_line_len = lambda line: reduce(operator.add,
map(lambda hd: hd["colspan"], line), 0)
for ds in paper.datasets:
ds.num_columns = reduce(max, map(get_line_len, ds.data) + \
[get_line_len(ds.column_headers),
get_line_len(ds.column_titles), ds.x_columns + ds.y_columns])
paper_title = get_record_val(recid, "245", sfcode = "a")
if not paper_title:
paper_title = "record %s" % (str(recid), )
res = re.search("F\\s*([0-9]+)", ds.location)
if res:
ds.title = "Data from figure %s from: %s" % (res.groups()[0], paper_title)
else:
ds.title = "Additional data from: %s" % (paper_title, )
# write_message("Setting the title")
# download necessary datasets and fix other things
cur_pos = 1
for ds in paper.datasets:
lo = ds.location.find("\n\n")
ds.location = ds.location[:lo].strip()
if ds.location and ds.location[0] == "(":
ds.location = ds.location[1:]
if ds.location and ds.location[-1] == ")":
ds.location = ds.location[:-1]
ds.location = ds.location.strip()
ds.position = cur_pos
cur_pos += 1
return paper
def retrieve_hepdata(page_url, recid):
"""retrieves a dataset either from cache or downloads and fills the cache"""
# we directly donwload... no cache this time
data = download_paper(page_url, recid)
return data
def get_hepdata_allids_url():
""" Return the URL of a site giving all identifiers
"""
return "%s/AllIds" % (CFG_HEPDATA_URL, )
def get_hepdata_url_from_recid(recid):
""" Returns a HEPData URL for a given recid
"""
return "%s/View/ins%s/all" % (CFG_HEPDATA_URL, str(recid))
def retrieve_data_for_record(recID):
"""Retrieves the Paper object representing data associated with a publication"""
rec = search_engine.get_record(recID)
paper = Paper.create_from_record(rec)
try:
paper.datasets = map(lambda x: x[1], get_attached_hepdata_datasets(recID))
except:
paper.datasets = None
if not paper.datasets:
return None
else:
return paper
def get_hepdata_by_recid_raw(recid):
"""Retrieves raw data corresponding to a HEPData record.
@param recid: Identifier of the record representing a dataset
@type recid: Integer
@returns: a tuple consisting of a record (bibrecord representation) and string of data
@rtype: (Record, String, String)
"""
rec = search_engine.get_record(recid)
# retrieving the data string (content of an attachment)
data_str = cPickle.dumps([])
data_plain = ""
from invenio import bibdocfile
brd = bibdocfile.BibRecDocs(recid)
if brd.has_docname_p("Data"):
bd = brd.get_bibdoc("Data")
try:
data_file = bd.get_file(".data")
if data_file:
data_str = data_file.get_content()
except:
#TODO: The document exists but does not have one of required formats ... we might want to record this in some type of log or even notify someone behind the scenes ?
pass
try:
data_file = bd.get_file(".txt")
if data_file:
data_plain = data_file.get_content()
except:
#TODO: The document exists but does not have one of required formats ... we might want to record this in some type of log or even notify someone behind the scenes ?
pass
return (rec, data_str, data_plain)
def get_hepdata_by_recid(parent_recid, recid):
"""Retrieve a dataset encoded in a given record
@param parent_recid: record identifier of the publication attaching the dataset
@type parent_recid: Integer
@param recid: Identifier of te record identifying the dataset
@type recid: Integer
@rtype: Dataset
@returns: A dataset represented by a record of a given number
"""
rec, data_str, data_plain = get_hepdata_by_recid_raw(recid)
return Dataset.create_from_record(rec, data_str, parent_recid, data_plain)
def get_attached_hepdata_records(recid):
"""Retrieves raw data of a HEPData for a given recid
We perform an additional in principle redundan (in the case of correct configuration)
step to remove possibly removed records
@param recid: The record id of a publication to which datasets refer
@type recid: Integer
@return: List of tuples (recid, record, data_string, data_plain)
@rtype: List of tuples"""
ids = get_attached_hepdata_dataset_ids(recid)
def rec_not_deleted(tup):
rec = tup[1]
if not "980" in rec:
return True
f_980 = rec["980"]
return reduce(lambda bool_res, subfield: bool_res and (not ('c', 'DELETED') in subfield[0]), f_980, True)
return filter(rec_not_deleted , map(lambda element: (element[0], element[1][0], element[1][1], element[1][2]), \
zip(ids, map(get_hepdata_by_recid_raw, ids))))
def get_attached_hepdata_dataset_ids(recid):
"""Returns all identifeirs of datasets attached to a given publication
@param recid: The identifeir of record to which datasets are attached
@type recid: Integer
@rtype: intbitset
@returns: intbitset of all the record identifeirs
"""
return search_engine.search_pattern(p="%s:%s" % (CFG_HEPDATA_FIELD, str(recid),))
def get_attached_hepdata_datasets(recid):
"""For a given recid, retrieves recids of datasets that are related
to a publication
@param recid: The identifeir of record to which datasets are attached
@type recid: Integer
@rtype: Lsit of tuples
@returns: List of tuples (recid, Dataset isntance) where recid is the
identifer of a record representing given dataset
"""
# Search for all the records refering to a given one
recids = get_attached_hepdata_dataset_ids(recid)
return zip(recids, map(
lambda dsrecid: get_hepdata_by_recid(recid, dsrecid), recids))
# Universal log
def hepdata_log(category, msg):
"""Log an important event that should be processed by the administrator
manually"""
log_path = os.path.join(CFG_LOGDIR, "hepdata.log")
f = open(log_path, "a")
f.write("%s %s: %s\n" % (str(datetime.now()), category, msg))
f.close()
# The harvesting daemon
def hepdata_get_all_identifiers():
page_content = download_with_retry(get_hepdata_allids_url())
matches = re.search("<pre>([^<]*)</pre>", page_content)
json_string = matches.groups()[0].replace(",,", ",0,")
return json.loads(json_string)[:-1] # We ommit the last 0,0,0 entry
def hepdata_harvest_get_identifiers():
"""
Retrieves identifiers of records that should be processed searching for
corresponding HEPData entry
"""
if task_has_option('record_to_harvest'):
yield task_get_option('record_to_harvest')
else:
used_ids = set() # sometimes records are reported many times
for res in hepdata_get_all_identifiers():
if res[0] and not res[0] in used_ids:
used_ids.add(res[0])
yield res[0]
def prepare_hepdata_for_upload(recid, hepdata, insert_stream, correct_stream,
task_stats, force_reupload=False):
"""Retrieve a single entry from HEPData and create MARC XML files to
upload to Inspire
Uploaded files are:
- patch to the original MARC record (assigning the link if it is
inconsistent with the current one)
- marc files for new records
@param invenio_id: Number of the record inside current Invenio
installation
@type invenio_id: Integer
@param hepdata: Paper object representing current state of HEPData
(downloaded from the website)
@type hepdata: Paper
@param insert_stream: Queue.Queue of string reperesentations of records that will
be passed to bibupload in the insert mode
@type insert_stream: Queue.Queue of strings
@param correct_stream: Queue.Queue of string reperesentations of records that
will be passed to bibupload in the correct mode
@type correct_stream: Queue.Queue of strings
"""
# 1) check the inspire number that is related to the
# How to detect if there is already an entry for HEPData try to upload
# the description
# Retrieve dataset records attached to the record.
dataset_records = get_attached_hepdata_records(recid)
get_record_pos = lambda entry: Dataset.create_from_record(entry[1], entry[2], None, None).position
dataset_records.sort(cmp = lambda x, y: cmp(get_record_pos(x),get_record_pos(y)))
#Applying changes to subsequent datasets !
# (The position is what matters in terms of uniqueness)
hepdata_datasets = hepdata.datasets
# 1) making lists have the same length
len_diff = len(dataset_records) - len(hepdata_datasets)
if len_diff > 0:
hepdata_datasets += [None] * len_diff
else:
dataset_records += [None] * (-len_diff)
import tempfile
# fdesc, fname = tempfile.mkstemp()
# os.write(fdesc, cPickle.dumps([dataset_records, hepdata_datasets]))
# os.close(fdesc)
# print "Retrieved datasets : %s" % (fname, )
num_deleted = 0
num_added = 0
num_modified = 0
for (inv_dataset, hep_dataset) in zip(dataset_records, hepdata_datasets):
if inv_dataset is None:
# create completely new record
insert_stream.put_nowait(hep_dataset.get_marcxml(recid))
if task_stats["semaphore"]:
task_stats["semaphore"].acquire()
task_stats["inserted_hepdata_datasets"] += 1
if task_stats["semaphore"]:
task_stats["semaphore"].release()
num_added += 1
elif hep_dataset is None:
# delete invenio record corresponding to a data set
if task_stats["semaphore"]:
task_stats["semaphore"].acquire()
task_stats["deleted_hepdata_datasets"] += 1
if task_stats["semaphore"]:
task_stats["semaphore"].release()
rec = {}
bibrecord.record_add_field(rec, "980", subfields = \
[("c", "DELETED")])
bibrecord.record_add_field(rec, "001", controlfield_value = \
str(inv_dataset[0]))
correct_stream.put_nowait(bibrecord.record_xml_output(rec))
num_deleted += 1
else:
diff_xml = hep_dataset.get_diff_marcxml(inv_dataset[1], recid, inv_dataset[2], inv_dataset[3], force_reupload=force_reupload)
if diff_xml:
if task_stats["semaphore"]:
task_stats["semaphore"].acquire()
task_stats["corrected_hepdata_datasets"] += 1
if task_stats["semaphore"]:
task_stats["semaphore"].release()
correct_stream.put_nowait(diff_xml)
num_modified += 1
# assure that the original MARC record is correct
rec = search_engine.get_record(recid)
if rec:
diff_marcxml = hepdata.get_diff_marcxml(rec)
if diff_marcxml:
correct_stream.put_nowait(diff_marcxml)
# task_stats["new_hepdata_records"] += 1
return num_added, num_deleted, num_modified
def get_data_line_length(data_line):
"""return a real width in columns of a data line"""
d_len = 0
for d in data_line:
d_len += d["colspan"]
return d_len
def calculate_columns_number(dataset):
"""Retrieve the real number of columns - maximum over data columns,
header columns and titles"""
max_len = 0
for data_l in dataset.data:
if get_data_line_length(data_l) > max_len:
max_len = get_data_line_length(data_l)
for data_l in dataset.data_qualifiers:
if get_data_line_length(data_l) > max_len:
max_len = get_data_line_length(data_l)
if get_data_line_length(dataset.column_headers) > max_len:
max_len = get_data_line_length(dataset.column_headers)
if get_data_line_length(dataset.column_titles) > max_len:
max_len = get_data_line_length(dataset.column_titles)
return max_len
def hepdata_harvest_task_submit_elaborate_specific_parameter(key, value, opts, args):
""" Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ['-n', '--number']:
task_get_option(\1) = value
return True
return False
"""
if key in ("--recid", "-r"):
task_set_option('record_to_harvest', value)
elif key in ("--nthreads", "-n"):
task_set_option('threads_number', value)
elif key in ("--force-reupload", "-f"):
task_set_option('force_reupload', True)
else:
return False
return True
def hepdata_harvest_main():
"""The main function of the HEPData harvesting daemon executed via BibSched.
This daemon harvests the complete HEPData set and uploads modifications
to Inspire.
"""
task_init(authorization_action = 'runhepdataharvest',
authorization_msg = "HEPDataHarvest Task Submission",
description = """Retrieve HEPData and attach them to correcponding
Invenio records.
Examples:
$ hepdataharvest -r 12
""",
help_specific_usage = \
""" -r, --recid The identifier of the record that should be reharvested
from HEPData
-n, --nthreads Number of concurrent harvesting threads. This number is
equal to the number of HTTP requests performed at the same
time
-f, --force-reupload Forces the harvester to reupload all data files
""",
version=__revision__,
specific_params=("r:n:f",
[ "recid=", "nthreads=", "force-reupload" ]),
task_submit_elaborate_specific_parameter_fnc =
hepdata_harvest_task_submit_elaborate_specific_parameter,
task_run_fnc = hepdata_harvest_task_core)
def write_xml_stream_to_tmpfile(stream, prefix):
"""
Stream: list of strings
writes a list of strings into a temporary MARCXML file.
The collection header and footer together with the XML
structure are added
@return Name of the temporary file
"""
if not stream:
# We do not want to write in the case of empty input
return None
import tempfile
fdesc, fname = tempfile.mkstemp(suffix = ".xml", prefix = prefix, \
dir = CFG_TMPSHAREDDIR)
os.write(fdesc, """<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">""")
for part in stream:
os.write(fdesc, part)
os.write(fdesc, "</collection>")
os.close(fdesc)
return fname
def update_single_status(recid, processed_recs, total_recs):
"""Update the BibSched task status"""
from math import floor
progress = floor(float(processed_recs * 1000) / total_recs)/10
task_update_progress("Harvested %i records out of %i ( %s%% ) " % (processed_recs, total_recs, str(progress)))
def process_single_thread(input_recids, insert_queue, correct_queue, failed_ids, task_stats, suspend_wait_queue, suspend_resume_queue, main_syn_queue, num_tasks, finished_queue = None, total_recs=0, force_reupload = False):
finished = False
processed_recs = 0
while not finished:
try:
recid = input_recids.get_nowait()
except:
finished = True
if not finished:
try:
hepdata = retrieve_hepdata(get_hepdata_url_from_recid(recid), recid)
try:
if not recid:
write_message("Problem! No recid present: %s" % (str(input_recids.queue)))
num_added, num_deleted, num_modified = prepare_hepdata_for_upload(
recid, hepdata, insert_queue, correct_queue,
task_stats, force_reupload = force_reupload)
write_message("Retrieved data for record %s: %i record added, %i records deleted, %i records modified" % (str(recid), num_added, num_deleted, num_modified ))
except Exception, e:
write_message("Error: merging HepData for record %s failed: %s" \
% (str(recid), str(e)))
failed_ids.put_nowait((str(recid), "Failed during the merging phase: %s" % (str(e), )))
except Exception, e:
write_message("Error: retrieving HEPData for record %s failed: %s" \
% (str(recid), str(e)))
failed_ids.put_nowait((str(recid), "Failed during the retrieval phase: %s" % (str(e), )))
if finished_queue:
finished_queue.put_nowait(str(recid))
else:
processed_recs +=1
update_single_status(str(recid), processed_recs, total_recs)
#Possibly trying to stop
task_status = task_read_status()
if task_status.startswith("ABOUT TO"):
if num_tasks == 1:
task_sleep_now_if_required(True)
else:
suspend_wait_queue.get()
write_message("Thread suspended")
if suspend_wait_queue.empty():
main_syn_queue.put("SLEEP")
suspend_resume_queue.get()
suspend_wait_queue.put(1)
write_message("Thread resumed")
elif task_status == "KILLED":
if num_tasks > 1:
main_syn_queue.put("KILLED")
else:
exit(0)
finished = True
if num_tasks > 1: #signalise that this is the end of execution of some thread
main_syn_queue.put("FINISH")
class RetrievalWorker(threading.Thread):
def __init__(self, recids_queue, insert_queue, correct_queue, finished_queue, failed_ids, task_stats, suspend_wait_queue, suspend_resume_queue, main_syn_queue, num_tasks, force_reupload=False):
threading.Thread.__init__(self)
self.input_recids = recids_queue
self.insert_queue = insert_queue
self.correct_queue = correct_queue
self.finished_queue = finished_queue
self.failed_ids = failed_ids
self.task_stats = task_stats
self.suspend_wait_queue = suspend_wait_queue
self.suspend_resume_queue = suspend_resume_queue
self.num_tasks = num_tasks
self.main_syn_queue = main_syn_queue
self.daemon = True
self.force_reupload = force_reupload
def run(self):
process_single_thread(self.input_recids, self.insert_queue, self.correct_queue,\
self.failed_ids, self.task_stats, self.suspend_wait_queue, \
self.suspend_resume_queue, self.main_syn_queue, self.num_tasks, self.finished_queue, force_reupload = self.force_reupload)
class StatusUpdater(threading.Thread):
"""This thread is used only to update the BibSched status"""
def __init__(self, total_records, finished_queue):
threading.Thread.__init__(self)
self.total_records = total_records
self.total_finished = 0
self.finished_queue = finished_queue
def run(self):
while self.total_finished != self.total_records:
finished_rec = self.finished_queue.get()
self.total_finished += 1
update_single_status(finished_rec, self.total_finished, self.total_records)
class SingleThreadQueue(object):
"""simple queue implementation for the case of a single processing thread.
Standard queue implementation involves threads anyway"""
def __init__(self):
self.queue = []
self.pointer = 0
def put(self, el):
self.queue.append(el)
def put_nowait(self, el):
self.queue.append(el)
def get_nowait(self):
self.pointer += 1
return self.queue[self.pointer - 1]
def get(self):
self.pointer += 1
return self.queue[self.pointer - 1]
def empty(self):
return self.pointer == len(self.queue)
def get_number_of_harvesting_threads():
"""Read the task parameters to retrieve the number of concurrent threads\
The default threads number is encoded in the configuration file
"""
if task_has_option("threads_number"):
return int(task_get_option("threads_number"))
return int(CFG_HEPDATA_THREADS_NUM)
def get_forceupload_param():
"""Read the task parameters to retrieve the information if data files should be reuploaded
"""
if task_has_option("force_reupload"):
return bool(task_get_option("force_reupload"))
return False
def hepdata_harvest_task_core():
def kill_handler(signum, frame):
write_message('KILLED')
exit(0)
signal.signal(signal.SIGTERM, kill_handler)
number_threads = get_number_of_harvesting_threads()
force_reupload = get_forceupload_param()
task_stats = {
"new_hepdata_records" : 0,
"inserted_hepdata_datasets" : 0,
"corrected_hepdata_datasets" : 0,
"deleted_hepdata_datasets" : 0
}
if number_threads > 1:
insert_queue = Queue.Queue()
correct_queue = Queue.Queue()
failed_ids = Queue.Queue()
recs_queue = Queue.Queue()
finished_queue = Queue.Queue()
suspend_resume_queue = Queue.Queue()
suspend_wait_queue = Queue.Queue()
main_syn_queue = Queue.Queue()
task_stats["semaphore"] = threading.Semaphore()
else:
insert_queue = SingleThreadQueue()
correct_queue = SingleThreadQueue()
failed_ids = SingleThreadQueue()
recs_queue = SingleThreadQueue()
task_stats["semaphore"] = None
write_message("STAGE0: Harvesting data and building the input")
# feed the input queue
total_recs = 0
for recid in hepdata_harvest_get_identifiers():
recs_queue.put_nowait(recid)
total_recs += 1
# spawn necessary number of workers (try not to spawn more than necessary)
if number_threads > 1:
for i in xrange(number_threads):
suspend_wait_queue.put(1)
ts = [RetrievalWorker(recs_queue, insert_queue, correct_queue, finished_queue, failed_ids, task_stats, suspend_wait_queue, suspend_resume_queue, main_syn_queue, number_threads, force_reupload = force_reupload) for i in xrange(number_threads)]
| update_t = StatusUpdater(total_recs, finished_queue) | 6,540 | lcc_e | python | null | 6e65ba37cab17e000e9cd179b485808a47a6068c1e996bb3 |
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import itertools
from lxml import etree
from openerp import models, fields, api, _
from openerp.exceptions import except_orm, Warning, RedirectWarning
import openerp.addons.decimal_precision as dp
# mapping invoice type to journal type
TYPE2JOURNAL = {
'out_invoice': 'sale',
'in_invoice': 'purchase',
'out_refund': 'sale_refund',
'in_refund': 'purchase_refund',
}
# mapping invoice type to refund type
TYPE2REFUND = {
'out_invoice': 'out_refund', # Customer Invoice
'in_invoice': 'in_refund', # Supplier Invoice
'out_refund': 'out_invoice', # Customer Refund
'in_refund': 'in_invoice', # Supplier Refund
}
MAGIC_COLUMNS = ('id', 'create_uid', 'create_date', 'write_uid', 'write_date')
class account_invoice(models.Model):
_name = "account.invoice"
_inherit = ['mail.thread']
_description = "Invoice"
_order = "number desc, id desc"
_track = {
'type': {
},
'state': {
'account.mt_invoice_paid': lambda self, cr, uid, obj, ctx=None: obj.state == 'paid' and obj.type in ('out_invoice', 'out_refund'),
'account.mt_invoice_validated': lambda self, cr, uid, obj, ctx=None: obj.state == 'open' and obj.type in ('out_invoice', 'out_refund'),
},
}
@api.one
@api.depends('invoice_line.price_subtotal', 'tax_line.amount')
def _compute_amount(self):
self.amount_untaxed = sum(line.price_subtotal for line in self.invoice_line)
self.amount_tax = sum(line.amount for line in self.tax_line)
self.amount_total = self.amount_untaxed + self.amount_tax
@api.model
def _default_journal(self):
inv_type = self._context.get('type', 'out_invoice')
inv_types = inv_type if isinstance(inv_type, list) else [inv_type]
company_id = self._context.get('company_id', self.env.user.company_id.id)
domain = [
('type', 'in', filter(None, map(TYPE2JOURNAL.get, inv_types))),
('company_id', '=', company_id),
]
return self.env['account.journal'].search(domain, limit=1)
@api.model
def _default_currency(self):
journal = self._default_journal()
return journal.currency or journal.company_id.currency_id
@api.model
@api.returns('account.analytic.journal')
def _get_journal_analytic(self, inv_type):
""" Return the analytic journal corresponding to the given invoice type. """
journal_type = TYPE2JOURNAL.get(inv_type, 'sale')
journal = self.env['account.analytic.journal'].search([('type', '=', journal_type)], limit=1)
if not journal:
raise except_orm(_('No Analytic Journal!'),
_("You must define an analytic journal of type '%s'!") % (journal_type,))
return journal
@api.one
@api.depends('account_id', 'move_id.line_id.account_id', 'move_id.line_id.reconcile_id')
def _compute_reconciled(self):
self.reconciled = self.test_paid()
if not self.reconciled and self.state == 'paid':
self.signal_workflow('open_test')
@api.model
def _get_reference_type(self):
return [('none', _('Free Reference'))]
@api.one
@api.depends(
'state', 'currency_id', 'invoice_line.price_subtotal',
'move_id.line_id.account_id.type',
'move_id.line_id.amount_residual',
'move_id.line_id.amount_residual_currency',
'move_id.line_id.currency_id',
'move_id.line_id.reconcile_partial_id.line_partial_ids.invoice.type',
)
def _compute_residual(self):
nb_inv_in_partial_rec = max_invoice_id = 0
self.residual = 0.0
for line in self.move_id.line_id:
if line.account_id.type in ('receivable', 'payable'):
if line.currency_id == self.currency_id:
self.residual += line.amount_residual_currency
else:
# ahem, shouldn't we use line.currency_id here?
from_currency = line.company_id.currency_id.with_context(date=line.date)
self.residual += from_currency.compute(line.amount_residual, self.currency_id)
# we check if the invoice is partially reconciled and if there
# are other invoices involved in this partial reconciliation
for pline in line.reconcile_partial_id.line_partial_ids:
if pline.invoice and self.type == pline.invoice.type:
nb_inv_in_partial_rec += 1
# store the max invoice id as for this invoice we will
# make a balance instead of a simple division
max_invoice_id = max(max_invoice_id, pline.invoice.id)
if nb_inv_in_partial_rec:
# if there are several invoices in a partial reconciliation, we
# split the residual by the number of invoices to have a sum of
# residual amounts that matches the partner balance
new_value = self.currency_id.round(self.residual / nb_inv_in_partial_rec)
if self.id == max_invoice_id:
# if it's the last the invoice of the bunch of invoices
# partially reconciled together, we make a balance to avoid
# rounding errors
self.residual = self.residual - ((nb_inv_in_partial_rec - 1) * new_value)
else:
self.residual = new_value
# prevent the residual amount on the invoice to be less than 0
self.residual = max(self.residual, 0.0)
@api.one
@api.depends(
'move_id.line_id.account_id',
'move_id.line_id.reconcile_id.line_id',
'move_id.line_id.reconcile_partial_id.line_partial_ids',
)
def _compute_move_lines(self):
# Give Journal Items related to the payment reconciled to this invoice.
# Return partial and total payments related to the selected invoice.
self.move_lines = self.env['account.move.line']
if not self.move_id:
return
data_lines = self.move_id.line_id.filtered(lambda l: l.account_id == self.account_id)
partial_lines = self.env['account.move.line']
for data_line in data_lines:
if data_line.reconcile_id:
lines = data_line.reconcile_id.line_id
elif data_line.reconcile_partial_id:
lines = data_line.reconcile_partial_id.line_partial_ids
else:
lines = self.env['account_move_line']
partial_lines += data_line
self.move_lines = lines - partial_lines
@api.one
@api.depends(
'move_id.line_id.reconcile_id.line_id',
'move_id.line_id.reconcile_partial_id.line_partial_ids',
)
def _compute_payments(self):
partial_lines = lines = self.env['account.move.line']
for line in self.move_id.line_id:
if line.reconcile_id:
lines |= line.reconcile_id.line_id
elif line.reconcile_partial_id:
lines |= line.reconcile_partial_id.line_partial_ids
partial_lines += line
self.payment_ids = (lines - partial_lines).sorted()
name = fields.Char(string='Reference/Description', index=True,
readonly=True, states={'draft': [('readonly', False)]})
origin = fields.Char(string='Source Document',
help="Reference of the document that produced this invoice.",
readonly=True, states={'draft': [('readonly', False)]})
supplier_invoice_number = fields.Char(string='Supplier Invoice Number',
help="The reference of this invoice as provided by the supplier.",
readonly=True, states={'draft': [('readonly', False)]})
type = fields.Selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
], string='Type', readonly=True, index=True, change_default=True,
default=lambda self: self._context.get('type', 'out_invoice'),
track_visibility='always')
number = fields.Char(related='move_id.name', store=True, readonly=True, copy=False)
internal_number = fields.Char(string='Invoice Number', readonly=True,
default=False, copy=False,
help="Unique number of the invoice, computed automatically when the invoice is created.")
reference = fields.Char(string='Invoice Reference',
help="The partner reference of this invoice.")
reference_type = fields.Selection('_get_reference_type', string='Payment Reference',
required=True, readonly=True, states={'draft': [('readonly', False)]},
default='none')
comment = fields.Text('Additional Information')
state = fields.Selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Paid'),
('cancel','Cancelled'),
], string='Status', index=True, readonly=True, default='draft',
track_visibility='onchange', copy=False,
help=" * The 'Draft' status is used when a user is encoding a new and unconfirmed Invoice.\n"
" * The 'Pro-forma' when invoice is in Pro-forma status,invoice does not have an invoice number.\n"
" * The 'Open' status is used when user create invoice,a invoice number is generated.Its in open status till user does not pay invoice.\n"
" * The 'Paid' status is set automatically when the invoice is paid. Its related journal entries may or may not be reconciled.\n"
" * The 'Cancelled' status is used when user cancel invoice.")
sent = fields.Boolean(readonly=True, default=False, copy=False,
help="It indicates that the invoice has been sent.")
date_invoice = fields.Date(string='Invoice Date',
readonly=True, states={'draft': [('readonly', False)]}, index=True,
help="Keep empty to use the current date", copy=False)
date_due = fields.Date(string='Due Date',
readonly=True, states={'draft': [('readonly', False)]}, index=True, copy=False,
help="If you use payment terms, the due date will be computed automatically at the generation "
"of accounting entries. The payment term may compute several due dates, for example 50% "
"now and 50% in one month, but if you want to force a due date, make sure that the payment "
"term is not set on the invoice. If you keep the payment term and the due date empty, it "
"means direct payment.")
partner_id = fields.Many2one('res.partner', string='Partner', change_default=True,
required=True, readonly=True, states={'draft': [('readonly', False)]},
track_visibility='always')
payment_term = fields.Many2one('account.payment.term', string='Payment Terms',
readonly=True, states={'draft': [('readonly', False)]},
help="If you use payment terms, the due date will be computed automatically at the generation "
"of accounting entries. If you keep the payment term and the due date empty, it means direct payment. "
"The payment term may compute several due dates, for example 50% now, 50% in one month.")
period_id = fields.Many2one('account.period', string='Force Period',
domain=[('state', '!=', 'done')], copy=False,
help="Keep empty to use the period of the validation(invoice) date.",
readonly=True, states={'draft': [('readonly', False)]})
account_id = fields.Many2one('account.account', string='Account',
required=True, readonly=True, states={'draft': [('readonly', False)]},
help="The partner account used for this invoice.")
invoice_line = fields.One2many('account.invoice.line', 'invoice_id', string='Invoice Lines',
readonly=True, states={'draft': [('readonly', False)]}, copy=True)
tax_line = fields.One2many('account.invoice.tax', 'invoice_id', string='Tax Lines',
readonly=True, states={'draft': [('readonly', False)]}, copy=True)
move_id = fields.Many2one('account.move', string='Journal Entry',
readonly=True, index=True, ondelete='restrict', copy=False,
help="Link to the automatically generated Journal Items.")
amount_untaxed = fields.Float(string='Subtotal', digits=dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_amount', track_visibility='always')
amount_tax = fields.Float(string='Tax', digits=dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_amount')
amount_total = fields.Float(string='Total', digits=dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_amount')
currency_id = fields.Many2one('res.currency', string='Currency',
required=True, readonly=True, states={'draft': [('readonly', False)]},
default=_default_currency, track_visibility='always')
journal_id = fields.Many2one('account.journal', string='Journal',
required=True, readonly=True, states={'draft': [('readonly', False)]},
default=_default_journal,
domain="[('type', 'in', {'out_invoice': ['sale'], 'out_refund': ['sale_refund'], 'in_refund': ['purchase_refund'], 'in_invoice': ['purchase']}.get(type, [])), ('company_id', '=', company_id)]")
company_id = fields.Many2one('res.company', string='Company', change_default=True,
required=True, readonly=True, states={'draft': [('readonly', False)]},
default=lambda self: self.env['res.company']._company_default_get('account.invoice'))
check_total = fields.Float(string='Verification Total', digits=dp.get_precision('Account'),
readonly=True, states={'draft': [('readonly', False)]}, default=0.0)
reconciled = fields.Boolean(string='Paid/Reconciled',
store=True, readonly=True, compute='_compute_reconciled',
help="It indicates that the invoice has been paid and the journal entry of the invoice has been reconciled with one or several journal entries of payment.")
partner_bank_id = fields.Many2one('res.partner.bank', string='Bank Account',
help='Bank Account Number to which the invoice will be paid. A Company bank account if this is a Customer Invoice or Supplier Refund, otherwise a Partner bank account number.',
readonly=True, states={'draft': [('readonly', False)]})
move_lines = fields.Many2many('account.move.line', string='Entry Lines',
compute='_compute_move_lines')
residual = fields.Float(string='Balance', digits=dp.get_precision('Account'),
compute='_compute_residual', store=True,
help="Remaining amount due.")
payment_ids = fields.Many2many('account.move.line', string='Payments',
compute='_compute_payments')
move_name = fields.Char(string='Journal Entry', readonly=True,
states={'draft': [('readonly', False)]}, copy=False)
user_id = fields.Many2one('res.users', string='Salesperson', track_visibility='onchange',
readonly=True, states={'draft': [('readonly', False)]},
default=lambda self: self.env.user)
fiscal_position = fields.Many2one('account.fiscal.position', string='Fiscal Position',
readonly=True, states={'draft': [('readonly', False)]})
commercial_partner_id = fields.Many2one('res.partner', string='Commercial Entity',
related='partner_id.commercial_partner_id', store=True, readonly=True,
help="The commercial entity that will be used on Journal Entries for this invoice")
_sql_constraints = [
('number_uniq', 'unique(number, company_id, journal_id, type)',
'Invoice Number must be unique per Company!'),
]
@api.model
def fields_view_get(self, view_id=None, view_type=False, toolbar=False, submenu=False):
context = self._context
if context.get('active_model') == 'res.partner' and context.get('active_ids'):
partner = self.env['res.partner'].browse(context['active_ids'])[0]
if not view_type:
view_id = self.env['ir.ui.view'].search([('name', '=', 'account.invoice.tree')]).id
view_type = 'tree'
elif view_type == 'form':
if partner.supplier and not partner.customer:
view_id = self.env['ir.ui.view'].search([('name', '=', 'account.invoice.supplier.form')]).id
elif partner.customer and not partner.supplier:
view_id = self.env['ir.ui.view'].search([('name', '=', 'account.invoice.form')]).id
res = super(account_invoice, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
# adapt selection of field journal_id
for field in res['fields']:
if field == 'journal_id' and type:
journal_select = self.env['account.journal']._name_search('', [('type', '=', type)], name_get_uid=1)
res['fields'][field]['selection'] = journal_select
doc = etree.XML(res['arch'])
if context.get('type'):
for node in doc.xpath("//field[@name='partner_bank_id']"):
if context['type'] == 'in_refund':
node.set('domain', "[('partner_id.ref_companies', 'in', [company_id])]")
elif context['type'] == 'out_refund':
node.set('domain', "[('partner_id', '=', partner_id)]")
if view_type == 'search':
if context.get('type') in ('out_invoice', 'out_refund'):
for node in doc.xpath("//group[@name='extended filter']"):
doc.remove(node)
if view_type == 'tree':
partner_string = _('Customer')
if context.get('type') in ('in_invoice', 'in_refund'):
partner_string = _('Supplier')
for node in doc.xpath("//field[@name='reference']"):
node.set('invisible', '0')
for node in doc.xpath("//field[@name='partner_id']"):
node.set('string', partner_string)
res['arch'] = etree.tostring(doc)
return res
@api.multi
def invoice_print(self):
""" Print the invoice and mark it as sent, so that we can see more
easily the next step of the workflow
"""
assert len(self) == 1, 'This option should only be used for a single id at a time.'
self.sent = True
return self.env['report'].get_action(self, 'account.report_invoice')
@api.multi
def action_invoice_sent(self):
""" Open a window to compose an email, with the edi invoice template
message loaded by default
"""
assert len(self) == 1, 'This option should only be used for a single id at a time.'
template = self.env.ref('account.email_template_edi_invoice', False)
compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)
ctx = dict(
default_model='account.invoice',
default_res_id=self.id,
default_use_template=bool(template),
default_template_id=template.id,
default_composition_mode='comment',
mark_invoice_as_sent=True,
)
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form.id, 'form')],
'view_id': compose_form.id,
'target': 'new',
'context': ctx,
}
@api.multi
def confirm_paid(self):
return self.write({'state': 'paid'})
@api.multi
def unlink(self):
for invoice in self:
if invoice.state not in ('draft', 'cancel'):
raise Warning(_('You cannot delete an invoice which is not draft or cancelled. You should refund it instead.'))
elif invoice.internal_number:
raise Warning(_('You cannot delete an invoice after it has been validated (and received a number). You can set it back to "Draft" state and modify its content, then re-confirm it.'))
return super(account_invoice, self).unlink()
@api.multi
def onchange_partner_id(self, type, partner_id, date_invoice=False,
payment_term=False, partner_bank_id=False, company_id=False):
account_id = False
payment_term_id = False
fiscal_position = False
bank_id = False
if partner_id:
p = self.env['res.partner'].browse(partner_id)
rec_account = p.property_account_receivable
pay_account = p.property_account_payable
if company_id:
if p.property_account_receivable.company_id and \
p.property_account_receivable.company_id.id != company_id and \
p.property_account_payable.company_id and \
p.property_account_payable.company_id.id != company_id:
prop = self.env['ir.property']
rec_dom = [('name', '=', 'property_account_receivable'), ('company_id', '=', company_id)]
pay_dom = [('name', '=', 'property_account_payable'), ('company_id', '=', company_id)]
res_dom = [('res_id', '=', 'res.partner,%s' % partner_id)]
rec_prop = prop.search(rec_dom + res_dom) or prop.search(rec_dom)
pay_prop = prop.search(pay_dom + res_dom) or prop.search(pay_dom)
rec_account = rec_prop.get_by_record(rec_prop)
pay_account = pay_prop.get_by_record(pay_prop)
if not rec_account and not pay_account:
action = self.env.ref('account.action_account_config')
msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.')
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
if type in ('out_invoice', 'out_refund'):
account_id = rec_account.id
payment_term_id = p.property_payment_term.id
else:
account_id = pay_account.id
payment_term_id = p.property_supplier_payment_term.id
fiscal_position = p.property_account_position.id
bank_id = p.bank_ids.id
result = {'value': {
'account_id': account_id,
'payment_term': payment_term_id,
'fiscal_position': fiscal_position,
}}
if type in ('in_invoice', 'in_refund'):
result['value']['partner_bank_id'] = bank_id
if payment_term != payment_term_id:
if payment_term_id:
to_update = self.onchange_payment_term_date_invoice(payment_term_id, date_invoice)
result['value'].update(to_update.get('value', {}))
else:
result['value']['date_due'] = False
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(bank_id)
result['value'].update(to_update.get('value', {}))
return result
@api.multi
def onchange_journal_id(self, journal_id=False):
if journal_id:
journal = self.env['account.journal'].browse(journal_id)
return {
'value': {
'currency_id': journal.currency.id or journal.company_id.currency_id.id,
'company_id': journal.company_id.id,
}
}
return {}
@api.multi
def onchange_payment_term_date_invoice(self, payment_term_id, date_invoice):
if not date_invoice:
date_invoice = fields.Date.today()
if not payment_term_id:
# To make sure the invoice due date should contain due date which is
# entered by user when there is no payment term defined
return {'value': {'date_due': self.date_due or date_invoice}}
pterm = self.env['account.payment.term'].browse(payment_term_id)
pterm_list = pterm.compute(value=1, date_ref=date_invoice)[0]
if pterm_list:
return {'value': {'date_due': max(line[0] for line in pterm_list)}}
else:
raise except_orm(_('Insufficient Data!'),
_('The payment term of supplier does not have a payment term line.'))
@api.multi
def onchange_invoice_line(self, lines):
return {}
@api.multi
def onchange_partner_bank(self, partner_bank_id=False):
return {'value': {}}
@api.multi
def onchange_company_id(self, company_id, part_id, type, invoice_line, currency_id):
# TODO: add the missing context parameter when forward-porting in trunk
# so we can remove this hack!
self = self.with_context(self.env['res.users'].context_get())
values = {}
domain = {}
if company_id and part_id and type:
p = self.env['res.partner'].browse(part_id)
if p.property_account_payable and p.property_account_receivable and \
p.property_account_payable.company_id.id != company_id and \
p.property_account_receivable.company_id.id != company_id:
prop = self.env['ir.property']
rec_dom = [('name', '=', 'property_account_receivable'), ('company_id', '=', company_id)]
pay_dom = [('name', '=', 'property_account_payable'), ('company_id', '=', company_id)]
res_dom = [('res_id', '=', 'res.partner,%s' % part_id)]
rec_prop = prop.search(rec_dom + res_dom) or prop.search(rec_dom)
pay_prop = prop.search(pay_dom + res_dom) or prop.search(pay_dom)
rec_account = rec_prop.get_by_record(rec_prop)
pay_account = pay_prop.get_by_record(pay_prop)
if not rec_account and not pay_account:
action = self.env.ref('account.action_account_config')
msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.')
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
if type in ('out_invoice', 'out_refund'):
acc_id = rec_account.id
else:
acc_id = pay_account.id
values= {'account_id': acc_id}
if self:
if company_id:
for line in self.invoice_line:
if not line.account_id:
continue
if line.account_id.company_id.id == company_id:
continue
accounts = self.env['account.account'].search([('name', '=', line.account_id.name), ('company_id', '=', company_id)])
if not accounts:
action = self.env.ref('account.action_account_config')
msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.')
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
line.write({'account_id': accounts[-1].id})
else:
for line_cmd in invoice_line or []:
if len(line_cmd) >= 3 and isinstance(line_cmd[2], dict):
line = self.env['account.account'].browse(line_cmd[2]['account_id'])
if line.company_id.id != company_id:
raise except_orm(
_('Configuration Error!'),
_("Invoice line account's company and invoice's company does not match.")
)
if company_id and type:
journal_type = TYPE2JOURNAL[type]
journals = self.env['account.journal'].search([('type', '=', journal_type), ('company_id', '=', company_id)])
if journals:
values['journal_id'] = journals[0].id
journal_defaults = self.env['ir.values'].get_defaults_dict('account.invoice', 'type=%s' % type)
if 'journal_id' in journal_defaults:
values['journal_id'] = journal_defaults['journal_id']
if not values.get('journal_id'):
field_desc = journals.fields_get(['journal_id'])
type_label = next(t for t, label in field_desc['journal_id']['selection'] if t == journal_type)
action = self.env.ref('account.action_account_journal_form')
msg = _('Cannot find any account journal of type "%s" for this company, You should create one.\n Please go to Journal Configuration') % type_label
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
domain = {'journal_id': [('id', 'in', journals.ids)]}
return {'value': values, 'domain': domain}
@api.multi
def action_cancel_draft(self):
# go from canceled state to draft state
self.write({'state': 'draft'})
self.delete_workflow()
self.create_workflow()
return True
@api.one
@api.returns('ir.ui.view')
def get_formview_id(self):
""" Update form view id of action to open the invoice """
if self.type == 'in_invoice':
return self.env.ref('account.invoice_supplier_form')
else:
return self.env.ref('account.invoice_form')
@api.multi
def move_line_id_payment_get(self):
# return the move line ids with the same account as the invoice self
if not self.id:
return []
query = """ SELECT l.id
FROM account_move_line l, account_invoice i
WHERE i.id = %s AND l.move_id = i.move_id AND l.account_id = i.account_id
"""
self._cr.execute(query, (self.id,))
return [row[0] for row in self._cr.fetchall()]
@api.multi
def test_paid(self):
# check whether all corresponding account move lines are reconciled
line_ids = self.move_line_id_payment_get()
if not line_ids:
return False
query = "SELECT reconcile_id FROM account_move_line WHERE id IN %s"
self._cr.execute(query, (tuple(line_ids),))
return all(row[0] for row in self._cr.fetchall())
@api.multi
def button_reset_taxes(self):
account_invoice_tax = self.env['account.invoice.tax']
ctx = dict(self._context)
for invoice in self:
self._cr.execute("DELETE FROM account_invoice_tax WHERE invoice_id=%s AND manual is False", (invoice.id,))
self.invalidate_cache()
partner = invoice.partner_id
if partner.lang:
ctx['lang'] = partner.lang
for taxe in account_invoice_tax.compute(invoice).values():
account_invoice_tax.create(taxe)
# dummy write on self to trigger recomputations
return self.with_context(ctx).write({'invoice_line': []})
@api.multi
def button_compute(self, set_total=False):
self.button_reset_taxes()
for invoice in self:
if set_total:
invoice.check_total = invoice.amount_total
return True
@staticmethod
def _convert_ref(ref):
return (ref or '').replace('/','')
@api.multi
def _get_analytic_lines(self):
""" Return a list of dict for creating analytic lines for self[0] """
company_currency = self.company_id.currency_id
sign = 1 if self.type in ('out_invoice', 'in_refund') else -1
iml = self.env['account.invoice.line'].move_line_get(self.id)
for il in iml:
if il['account_analytic_id']:
if self.type in ('in_invoice', 'in_refund'):
ref = self.reference
else:
ref = self._convert_ref(self.number)
if not self.journal_id.analytic_journal_id:
raise except_orm(_('No Analytic Journal!'),
_("You have to define an analytic journal on the '%s' journal!") % (self.journal_id.name,))
currency = self.currency_id.with_context(date=self.date_invoice)
il['analytic_lines'] = [(0,0, {
'name': il['name'],
'date': self.date_invoice,
'account_id': il['account_analytic_id'],
'unit_amount': il['quantity'],
'amount': currency.compute(il['price'], company_currency) * sign,
'product_id': il['product_id'],
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': self.journal_id.analytic_journal_id.id,
'ref': ref,
})]
return iml
@api.multi
def action_date_assign(self):
for inv in self:
res = inv.onchange_payment_term_date_invoice(inv.payment_term.id, inv.date_invoice)
if res and res.get('value'):
inv.write(res['value'])
return True
@api.multi
def finalize_invoice_move_lines(self, move_lines):
""" finalize_invoice_move_lines(move_lines) -> move_lines
Hook method to be overridden in additional modules to verify and
possibly alter the move lines to be created by an invoice, for
special cases.
:param move_lines: list of dictionaries with the account.move.lines (as for create())
:return: the (possibly updated) final move_lines to create for this invoice
"""
return move_lines
@api.multi
def check_tax_lines(self, compute_taxes):
account_invoice_tax = self.env['account.invoice.tax']
company_currency = self.company_id.currency_id
if not self.tax_line:
for tax in compute_taxes.values():
account_invoice_tax.create(tax)
else:
tax_key = []
for tax in self.tax_line:
if tax.manual:
continue
key = (tax.tax_code_id.id, tax.base_code_id.id, tax.account_id.id)
tax_key.append(key)
if key not in compute_taxes:
raise except_orm(_('Warning!'), _('Global taxes defined, but they are not in invoice lines !'))
base = compute_taxes[key]['base']
if abs(base - tax.base) > company_currency.rounding:
raise except_orm(_('Warning!'), _('Tax base different!\nClick on compute to update the tax base.'))
for key in compute_taxes:
if key not in tax_key:
raise except_orm(_('Warning!'), _('Taxes are missing!\nClick on compute button.'))
@api.multi
def compute_invoice_totals(self, company_currency, ref, invoice_move_lines):
total = 0
total_currency = 0
for line in invoice_move_lines:
if self.currency_id != company_currency:
currency = self.currency_id.with_context(date=self.date_invoice or fields.Date.today())
line['currency_id'] = currency.id
line['amount_currency'] = line['price']
line['price'] = currency.compute(line['price'], company_currency)
else:
line['currency_id'] = False
line['amount_currency'] = False
line['ref'] = ref
if self.type in ('out_invoice','in_refund'):
total += line['price']
total_currency += line['amount_currency'] or line['price']
line['price'] = - line['price']
else:
total -= line['price']
total_currency -= line['amount_currency'] or line['price']
return total, total_currency, invoice_move_lines
def inv_line_characteristic_hashcode(self, invoice_line):
"""Overridable hashcode generation for invoice lines. Lines having the same hashcode
will be grouped together if the journal has the 'group line' option. Of course a module
can add fields to invoice lines that would need to be tested too before merging lines
or not."""
return "%s-%s-%s-%s-%s" % (
invoice_line['account_id'],
invoice_line.get('tax_code_id', 'False'),
invoice_line.get('product_id', 'False'),
invoice_line.get('analytic_account_id', 'False'),
invoice_line.get('date_maturity', 'False'),
)
def group_lines(self, iml, line):
"""Merge account move lines (and hence analytic lines) if invoice line hashcodes are equals"""
if self.journal_id.group_invoice_lines:
line2 = {}
for x, y, l in line:
tmp = self.inv_line_characteristic_hashcode(l)
if tmp in line2:
am = line2[tmp]['debit'] - line2[tmp]['credit'] + (l['debit'] - l['credit'])
line2[tmp]['debit'] = (am > 0) and am or 0.0
line2[tmp]['credit'] = (am < 0) and -am or 0.0
line2[tmp]['tax_amount'] += l['tax_amount']
line2[tmp]['analytic_lines'] += l['analytic_lines']
else:
line2[tmp] = l
line = []
for key, val in line2.items():
line.append((0,0,val))
return line
@api.multi
def action_move_create(self):
""" Creates invoice related analytics and financial move lines """
account_invoice_tax = self.env['account.invoice.tax']
account_move = self.env['account.move']
for inv in self:
if not inv.journal_id.sequence_id:
raise except_orm(_('Error!'), _('Please define sequence on the journal related to this invoice.'))
if not inv.invoice_line:
raise except_orm(_('No Invoice Lines!'), _('Please create some invoice lines.'))
if inv.move_id:
continue
ctx = dict(self._context, lang=inv.partner_id.lang)
if not inv.date_invoice:
inv.with_context(ctx).write({'date_invoice': fields.Date.context_today(self)})
date_invoice = inv.date_invoice
company_currency = inv.company_id.currency_id
# create the analytical lines, one move line per invoice line
iml = inv._get_analytic_lines()
# check if taxes are all computed
compute_taxes = account_invoice_tax.compute(inv)
inv.check_tax_lines(compute_taxes)
# I disabled the check_total feature
group_check_total = self.env.ref('account.group_supplier_inv_check_total')
if self.env.user in group_check_total.users:
if inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding / 2.0):
raise except_orm(_('Bad Total!'), _('Please verify the price of the invoice!\nThe encoded total does not match the computed total.'))
if inv.payment_term:
total_fixed = total_percent = 0
for line in inv.payment_term.line_ids:
if line.value == 'fixed':
total_fixed += line.value_amount
if line.value == 'procent':
total_percent += line.value_amount
total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)
if (total_fixed + total_percent) > 100:
raise except_orm(_('Error!'), _("Cannot create the invoice.\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'."))
# one move line per tax line
iml += account_invoice_tax.move_line_get(inv.id)
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
else:
ref = self._convert_ref(inv.number)
diff_currency = inv.currency_id != company_currency
# create one move line for the total and possibly adjust the other lines amount
total, total_currency, iml = inv.with_context(ctx).compute_invoice_totals(company_currency, ref, iml)
name = inv.name or inv.supplier_invoice_number or '/'
totlines = []
if inv.payment_term:
totlines = inv.with_context(ctx).payment_term.compute(total, date_invoice)[0]
if totlines:
res_amount_currency = total_currency
ctx['date'] = date_invoice
for i, t in enumerate(totlines):
if inv.currency_id != company_currency:
amount_currency = company_currency.with_context(ctx).compute(t[1], inv.currency_id)
else:
amount_currency = False
# last line: add the diff
res_amount_currency -= amount_currency or 0
if i + 1 == len(totlines):
amount_currency += res_amount_currency
iml.append({
'type': 'dest',
'name': name,
'price': t[1],
'account_id': inv.account_id.id,
'date_maturity': t[0],
'amount_currency': diff_currency and amount_currency,
'currency_id': diff_currency and inv.currency_id.id,
'ref': ref,
})
else:
iml.append({
'type': 'dest',
'name': name,
'price': total,
'account_id': inv.account_id.id,
'date_maturity': inv.date_due,
'amount_currency': diff_currency and total_currency,
'currency_id': diff_currency and inv.currency_id.id,
'ref': ref
})
date = date_invoice
part = self.env['res.partner']._find_accounting_partner(inv.partner_id)
line = [(0, 0, self.line_get_convert(l, part.id, date)) for l in iml]
line = inv.group_lines(iml, line)
journal = inv.journal_id.with_context(ctx)
if journal.centralisation:
raise except_orm(_('User Error!'),
_('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))
line = inv.finalize_invoice_move_lines(line)
move_vals = {
'ref': inv.reference or inv.name,
'line_id': line,
'journal_id': journal.id,
'date': date,
'narration': inv.comment,
'company_id': inv.company_id.id,
}
ctx['company_id'] = inv.company_id.id
period = inv.period_id
if not period:
period = period.with_context(ctx).find(date_invoice)[:1]
if period:
move_vals['period_id'] = period.id
for i in line:
i[2]['period_id'] = period.id
ctx['invoice'] = inv
move = account_move.with_context(ctx).create(move_vals)
# make the invoice point to that move
vals = {
'move_id': move.id,
'period_id': period.id,
'move_name': move.name,
}
inv.with_context(ctx).write(vals)
# Pass invoice in context in method post: used if you want to get the same
# account move reference when creating the same invoice after a cancelled one:
move.post()
self._log_event()
return True
@api.multi
def invoice_validate(self):
return self.write({'state': 'open'})
@api.model
def line_get_convert(self, line, part, date):
return {
'date_maturity': line.get('date_maturity', False),
'partner_id': part,
'name': line['name'][:64],
'date': date,
'debit': line['price']>0 and line['price'],
'credit': line['price']<0 and -line['price'],
'account_id': line['account_id'],
'analytic_lines': line.get('analytic_lines', []),
'amount_currency': line['price']>0 and abs(line.get('amount_currency', False)) or -abs(line.get('amount_currency', False)),
'currency_id': line.get('currency_id', False),
'tax_code_id': line.get('tax_code_id', False),
'tax_amount': line.get('tax_amount', False),
'ref': line.get('ref', False),
'quantity': line.get('quantity',1.00),
'product_id': line.get('product_id', False),
'product_uom_id': line.get('uos_id', False),
'analytic_account_id': line.get('account_analytic_id', False),
}
@api.multi
def action_number(self):
#TODO: not correct fix but required a fresh values before reading it.
self.write({})
for inv in self:
self.write({'internal_number': inv.number})
if inv.type in ('in_invoice', 'in_refund'):
if not inv.reference:
ref = self._convert_ref(inv.number)
else:
ref = inv.reference
else:
ref = self._convert_ref(inv.number)
self._cr.execute(""" UPDATE account_move SET ref=%s
WHERE id=%s AND (ref IS NULL OR ref = '')""",
(ref, inv.move_id.id))
self._cr.execute(""" UPDATE account_move_line SET ref=%s
WHERE move_id=%s AND (ref IS NULL OR ref = '')""",
(ref, inv.move_id.id))
self._cr.execute(""" UPDATE account_analytic_line SET ref=%s
FROM account_move_line
WHERE account_move_line.move_id = %s AND
account_analytic_line.move_id = account_move_line.id""",
(ref, inv.move_id.id))
self.invalidate_cache()
return True
@api.multi
def action_cancel(self):
moves = self.env['account.move']
for inv in self:
if inv.move_id:
moves += inv.move_id
if inv.payment_ids:
for move_line in inv.payment_ids:
if move_line.reconcile_partial_id.line_partial_ids:
raise except_orm(_('Error!'), _('You cannot cancel an invoice which is partially paid. You need to unreconcile related payment entries first.'))
# First, set the invoices as cancelled and detach the move ids
self.write({'state': 'cancel', 'move_id': False})
if moves:
# second, invalidate the move(s)
moves.button_cancel()
# delete the move this invoice was pointing to
# Note that the corresponding move_lines and move_reconciles
# will be automatically deleted too
moves.unlink()
self._log_event(-1.0, 'Cancel Invoice')
return True
###################
@api.multi
def _log_event(self, factor=1.0, name='Open Invoice'):
#TODO: implement messages system
return True
@api.multi
def name_get(self):
TYPES = {
'out_invoice': _('Invoice'),
'in_invoice': _('Supplier Invoice'),
'out_refund': _('Refund'),
'in_refund': _('Supplier Refund'),
}
result = []
for inv in self:
result.append((inv.id, "%s %s" % (inv.number or TYPES[inv.type], inv.name or '')))
return result
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
recs = self.browse()
if name:
recs = self.search([('number', '=', name)] + args, limit=limit)
if not recs:
recs = self.search([('name', operator, name)] + args, limit=limit)
return recs.name_get()
@api.model
def _refund_cleanup_lines(self, lines):
""" Convert records to dict of values suitable for one2many line creation
:param recordset lines: records to convert
:return: list of command tuple for one2many line creation [(0, 0, dict of valueis), ...]
"""
result = []
for line in lines:
values = {}
for name, field in line._fields.iteritems():
if name in MAGIC_COLUMNS:
continue
elif field.type == 'many2one':
values[name] = line[name].id
elif field.type not in ['many2many', 'one2many']:
values[name] = line[name]
elif name == 'invoice_line_tax_id':
values[name] = [(6, 0, line[name].ids)]
result.append((0, 0, values))
return result
@api.model
def _prepare_refund(self, invoice, date=None, period_id=None, description=None, journal_id=None):
""" Prepare the dict of values to create the new refund from the invoice.
This method may be overridden to implement custom
refund generation (making sure to call super() to establish
a clean extension chain).
:param record invoice: invoice to refund
:param string date: refund creation date from the wizard
:param integer period_id: force account.period from the wizard
:param string description: description of the refund from the wizard
:param integer journal_id: account.journal from the wizard
:return: dict of value to create() the refund
"""
values = {}
for field in ['name', 'reference', 'comment', 'date_due', 'partner_id', 'company_id',
'account_id', 'currency_id', 'payment_term', 'user_id', 'fiscal_position']:
if invoice._fields[field].type == 'many2one':
values[field] = invoice[field].id
else:
values[field] = invoice[field] or False
values['invoice_line'] = self._refund_cleanup_lines(invoice.invoice_line)
tax_lines = filter(lambda l: l.manual, invoice.tax_line)
values['tax_line'] = self._refund_cleanup_lines(tax_lines)
if journal_id:
journal = self.env['account.journal'].browse(journal_id)
elif invoice['type'] == 'in_invoice':
journal = self.env['account.journal'].search([('type', '=', 'purchase_refund')], limit=1)
else:
journal = self.env['account.journal'].search([('type', '=', 'sale_refund')], limit=1)
values['journal_id'] = journal.id
values['type'] = TYPE2REFUND[invoice['type']]
values['date_invoice'] = date or fields.Date.today()
values['state'] = 'draft'
values['number'] = False
if period_id:
values['period_id'] = period_id
if description:
values['name'] = description
return values
@api.multi
@api.returns('self')
def refund(self, date=None, period_id=None, description=None, journal_id=None):
new_invoices = self.browse()
for invoice in self:
# create the new invoice
values = self._prepare_refund(invoice, date=date, period_id=period_id,
description=description, journal_id=journal_id)
new_invoices += self.create(values)
return new_invoices
@api.v8
def pay_and_reconcile(self, pay_amount, pay_account_id, period_id, pay_journal_id,
writeoff_acc_id, writeoff_period_id, writeoff_journal_id, name=''):
# TODO check if we can use different period for payment and the writeoff line
assert len(self)==1, "Can only pay one invoice at a time."
# Take the seq as name for move
SIGN = {'out_invoice': -1, 'in_invoice': 1, 'out_refund': 1, 'in_refund': -1}
direction = SIGN[self.type]
# take the chosen date
date = self._context.get('date_p') or fields.Date.today()
# Take the amount in currency and the currency of the payment
if self._context.get('amount_currency') and self._context.get('currency_id'):
amount_currency = self._context['amount_currency']
currency_id = self._context['currency_id']
else:
amount_currency = False
currency_id = False
pay_journal = self.env['account.journal'].browse(pay_journal_id)
if self.type in ('in_invoice', 'in_refund'):
ref = self.reference
else:
ref = self._convert_ref(self.number)
partner = self.partner_id._find_accounting_partner(self.partner_id)
name = name or self.invoice_line.name or self.number
# Pay attention to the sign for both debit/credit AND amount_currency
l1 = {
'name': name,
'debit': direction * pay_amount > 0 and direction * pay_amount,
'credit': direction * pay_amount < 0 and -direction * pay_amount,
'account_id': self.account_id.id,
'partner_id': partner.id,
'ref': ref,
'date': date,
'currency_id': currency_id,
'amount_currency': direction * (amount_currency or 0.0),
'company_id': self.company_id.id,
}
l2 = {
'name': name,
'debit': direction * pay_amount < 0 and -direction * pay_amount,
'credit': direction * pay_amount > 0 and direction * pay_amount,
'account_id': pay_account_id,
'partner_id': partner.id,
'ref': ref,
'date': date,
'currency_id': currency_id,
'amount_currency': -direction * (amount_currency or 0.0),
'company_id': self.company_id.id,
}
move = self.env['account.move'].create({
'ref': ref,
'line_id': [(0, 0, l1), (0, 0, l2)],
'journal_id': pay_journal_id,
'period_id': period_id,
'date': date,
})
move_ids = (move | self.move_id).ids
self._cr.execute("SELECT id FROM account_move_line WHERE move_id IN %s",
(tuple(move_ids),))
lines = self.env['account.move.line'].browse([r[0] for r in self._cr.fetchall()])
lines2rec = lines.browse()
total = 0.0
for line in itertools.chain(lines, self.payment_ids):
if line.account_id == self.account_id:
lines2rec += line
total += (line.debit or 0.0) - (line.credit or 0.0)
inv_id, name = self.name_get()[0]
if not round(total, self.env['decimal.precision'].precision_get('Account')) or writeoff_acc_id:
lines2rec.reconcile('manual', writeoff_acc_id, writeoff_period_id, writeoff_journal_id)
else:
code = self.currency_id.symbol
# TODO: use currency's formatting function
msg = _("Invoice partially paid: %s%s of %s%s (%s%s remaining).") % \
(pay_amount, code, self.amount_total, code, total, code)
self.message_post(body=msg)
lines2rec.reconcile_partial('manual')
# Update the stored value (fields.function), so we write to trigger recompute
return self.write({})
@api.v7
def pay_and_reconcile(self, cr, uid, ids, pay_amount, pay_account_id, period_id, pay_journal_id,
writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context=None, name=''):
recs = self.browse(cr, uid, ids, context)
return recs.pay_and_reconcile(pay_amount, pay_account_id, period_id, pay_journal_id,
writeoff_acc_id, writeoff_period_id, writeoff_journal_id, name=name)
class account_invoice_line(models.Model):
_name = "account.invoice.line"
_description = "Invoice Line"
_order = "invoice_id,sequence,id"
@api.one
@api.depends('price_unit', 'discount', 'invoice_line_tax_id', 'quantity',
'product_id', 'invoice_id.partner_id', 'invoice_id.currency_id')
def _compute_price(self):
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
taxes = self.invoice_line_tax_id.compute_all(price, self.quantity, product=self.product_id, partner=self.invoice_id.partner_id)
self.price_subtotal = taxes['total']
if self.invoice_id:
self.price_subtotal = self.invoice_id.currency_id.round(self.price_subtotal)
@api.model
def _default_price_unit(self):
if not self._context.get('check_total'):
return 0
total = self._context['check_total']
for l in self._context.get('invoice_line', []):
if isinstance(l, (list, tuple)) and len(l) >= 3 and l[2]:
vals = l[2]
price = vals.get('price_unit', 0) * (1 - vals.get('discount', 0) / 100.0)
total = total - (price * vals.get('quantity'))
taxes = vals.get('invoice_line_tax_id')
if taxes and len(taxes[0]) >= 3 and taxes[0][2]:
taxes = self.env['account.tax'].browse(taxes[0][2])
tax_res = taxes.compute_all(price, vals.get('quantity'),
product=vals.get('product_id'), partner=self._context.get('partner_id'))
for tax in tax_res['taxes']:
total = total - tax['amount']
return total
@api.model
def _default_account(self):
# XXX this gets the default account for the user's company,
# it should get the default account for the invoice's company
# however, the invoice's company does not reach this point
if self._context.get('type') in ('out_invoice', 'out_refund'):
return self.env['ir.property'].get('property_account_income_categ', 'product.category')
else:
return self.env['ir.property'].get('property_account_expense_categ', 'product.category')
name = fields.Text(string='Description', required=True)
origin = fields.Char(string='Source Document',
help="Reference of the document that produced this invoice.")
sequence = fields.Integer(string='Sequence', default=10,
help="Gives the sequence of this line when displaying the invoice.")
invoice_id = fields.Many2one('account.invoice', string='Invoice Reference',
ondelete='cascade', index=True)
uos_id = fields.Many2one('product.uom', string='Unit of Measure',
ondelete='set null', index=True)
product_id = fields.Many2one('product.product', string='Product',
ondelete='set null', index=True)
account_id = fields.Many2one('account.account', string='Account',
required=True, domain=[('type', 'not in', ['view', 'closed'])],
default=_default_account,
help="The income or expense account related to the selected product.")
price_unit = fields.Float(string='Unit Price', required=True,
| digits= dp.get_precision('Product Price'), | 4,978 | lcc_e | python | null | b12ef08689195e4cc8f175bda64fb4109ab1345bf685b23e |
|
# encoding: utf-8
"""
Common implementation of ID, Population, PopulationView and Assembly classes.
These base classes should be sub-classed by the backend-specific classes.
:copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS.
:license: CeCILL, see LICENSE for details.
"""
import numpy
import logging
import operator
from itertools import chain
try:
basestring
reduce
except NameError:
basestring = str
from functools import reduce
from collections import defaultdict
from pyNN import random, recording, errors, standardmodels, core, space, descriptions
from pyNN.models import BaseCellType
from pyNN.parameters import ParameterSpace, LazyArray, simplify as simplify_parameter_array
from pyNN.recording import files
deprecated = core.deprecated
logger = logging.getLogger("PyNN")
def is_conductance(target_cell):
"""
Returns True if the target cell uses conductance-based synapses, False if
it uses current-based synapses, and None if the synapse-basis cannot be
determined.
"""
if hasattr(target_cell, 'local') and target_cell.local and hasattr(target_cell, 'celltype'):
is_conductance = target_cell.celltype.conductance_based
else:
is_conductance = None
return is_conductance
class IDMixin(object):
"""
Instead of storing ids as integers, we store them as ID objects,
which allows a syntax like:
p[3,4].tau_m = 20.0
where p is a Population object.
"""
# Simulator ID classes should inherit both from the base type of the ID
# (e.g., int or long) and from IDMixin.
def __getattr__(self, name):
if name == "parent":
raise Exception("parent is not set")
elif name == "set":
errmsg = "For individual cells, set values using the parameter name directly, " \
"e.g. population[0].tau_m = 20.0, or use 'set' on a population view, " \
"e.g. population[0:1].set(tau_m=20.0)"
raise AttributeError(errmsg)
try:
val = self.get_parameters()[name]
except KeyError:
raise errors.NonExistentParameterError(name,
self.celltype.__class__.__name__,
self.celltype.get_parameter_names())
return val
def __setattr__(self, name, value):
if name == "parent":
object.__setattr__(self, name, value)
elif self.celltype.has_parameter(name):
self.set_parameters(**{name: value})
else:
object.__setattr__(self, name, value)
def set_parameters(self, **parameters):
"""
Set cell parameters, given as a sequence of parameter=value arguments.
"""
# if some of the parameters are computed from the values of other
# parameters, need to get and translate all parameters
if self.local:
self.as_view().set(**parameters)
else:
raise errors.NotLocalError("Cannot set parameters for a cell that does not exist on this node.")
def get_parameters(self):
"""Return a dict of all cell parameters."""
if self.local:
parameter_names = self.celltype.get_parameter_names()
return dict((k, v) for k, v in zip(parameter_names, self.as_view().get(parameter_names)))
else:
raise errors.NotLocalError("Cannot obtain parameters for a cell that does not exist on this node.")
@property
def celltype(self):
return self.parent.celltype
@property
def is_standard_cell(self):
return isinstance(self.celltype, standardmodels.StandardCellType)
def _set_position(self, pos):
"""
Set the cell position in 3D space.
Cell positions are stored in an array in the parent Population.
"""
assert isinstance(pos, (tuple, numpy.ndarray))
assert len(pos) == 3
self.parent._set_cell_position(self, pos)
def _get_position(self):
"""
Return the cell position in 3D space.
Cell positions are stored in an array in the parent Population, if any,
or within the ID object otherwise. Positions are generated the first
time they are requested and then cached.
"""
return self.parent._get_cell_position(self)
position = property(_get_position, _set_position)
@property
def local(self):
return self.parent.is_local(self)
def inject(self, current_source):
"""Inject current from a current source object into the cell."""
current_source.inject_into([self])
def get_initial_value(self, variable):
"""Get the initial value of a state variable of the cell."""
return self.parent._get_cell_initial_value(self, variable)
def set_initial_value(self, variable, value):
"""Set the initial value of a state variable of the cell."""
self.parent._set_cell_initial_value(self, variable, value)
def as_view(self):
"""Return a PopulationView containing just this cell."""
index = self.parent.id_to_index(self)
return self.parent[index:index + 1]
class BasePopulation(object):
_record_filter = None
def __getitem__(self, index):
"""
Return either a single cell (ID object) from the Population, if `index`
is an integer, or a subset of the cells (PopulationView object), if
`index` is a slice or array.
Note that __getitem__ is called when using [] access, e.g.
p = Population(...)
p[2] is equivalent to p.__getitem__(2).
p[3:6] is equivalent to p.__getitem__(slice(3, 6))
"""
if isinstance(index, (int, numpy.integer)):
return self.all_cells[index]
elif isinstance(index, (slice, list, numpy.ndarray)):
return self._get_view(index)
elif isinstance(index, tuple):
return self._get_view(list(index))
else:
raise TypeError("indices must be integers, slices, lists, arrays or tuples, not %s" % type(index).__name__)
def __len__(self):
"""Return the total number of cells in the population (all nodes)."""
return self.size
@property
def local_size(self):
"""Return the number of cells in the population on the local MPI node"""
return len(self.local_cells) # would self._mask_local.sum() be faster?
def __iter__(self):
"""Iterator over cell ids on the local node."""
return iter(self.local_cells)
@property
def conductance_based(self):
"""
Indicates whether the post-synaptic response is modelled as a change
in conductance or a change in current.
"""
return self.celltype.conductance_based
@property
def receptor_types(self):
return self.celltype.receptor_types
def is_local(self, id):
"""
Indicates whether the cell with the given ID exists on the local MPI node.
"""
assert id.parent is self
index = self.id_to_index(id)
return self._mask_local[index]
def all(self):
"""Iterator over cell ids on all MPI nodes."""
return iter(self.all_cells)
def __add__(self, other):
"""
A Population/PopulationView can be added to another Population,
PopulationView or Assembly, returning an Assembly.
"""
assert isinstance(other, BasePopulation)
return self._assembly_class(self, other)
def _get_cell_position(self, id):
index = self.id_to_index(id)
return self.positions[:, index]
def _set_cell_position(self, id, pos):
index = self.id_to_index(id)
self.positions[:, index] = pos
@property
def position_generator(self): # "generator" is a misleading name, has no yield statement
def gen(i):
return self.positions.T[i]
return gen
def _get_cell_initial_value(self, id, variable):
assert isinstance(self.initial_values[variable], LazyArray)
index = self.id_to_local_index(id)
return self.initial_values[variable][index]
def _set_cell_initial_value(self, id, variable, value):
assert isinstance(self.initial_values[variable], LazyArray)
index = self.id_to_local_index(id)
self.initial_values[variable][index] = value
def nearest(self, position):
"""Return the neuron closest to the specified position."""
# doesn't always work correctly if a position is equidistant between
# two neurons, i.e. 0.5 should be rounded up, but it isn't always.
# also doesn't take account of periodic boundary conditions
pos = numpy.array([position] * self.positions.shape[1]).transpose()
dist_arr = (self.positions - pos)**2
distances = dist_arr.sum(axis=0)
nearest = distances.argmin()
return self[nearest]
def sample(self, n, rng=None):
"""
Randomly sample `n` cells from the Population, and return a
PopulationView object.
"""
assert isinstance(n, int)
if not rng:
rng = random.NumpyRNG()
indices = rng.permutation(numpy.arange(len(self), dtype=numpy.int))[0:n]
logger.debug("The %d cells selected have indices %s" % (n, indices))
logger.debug("%s.sample(%s)", self.label, n)
return self._get_view(indices)
def get(self, parameter_names, gather=False, simplify=True):
"""
Get the values of the given parameters for every local cell in the
population, or, if gather=True, for all cells in the population.
Values will be expressed in the standard PyNN units (i.e. millivolts,
nanoamps, milliseconds, microsiemens, nanofarads, event per second).
"""
# if all the cells have the same value for a parameter, should
# we return just the number, rather than an array?
if isinstance(parameter_names, basestring):
parameter_names = (parameter_names,)
return_list = False
else:
return_list = True
if isinstance(self.celltype, standardmodels.StandardCellType):
if any(name in self.celltype.computed_parameters() for name in parameter_names):
native_names = self.celltype.get_native_names() # need all parameters in order to calculate values
else:
native_names = self.celltype.get_native_names(*parameter_names)
native_parameter_space = self._get_parameters(*native_names)
parameter_space = self.celltype.reverse_translate(native_parameter_space)
else:
parameter_space = self._get_parameters(*self.celltype.get_parameter_names())
parameter_space.evaluate(simplify=simplify) # what if parameter space is homogeneous on some nodes but not on others?
# this also causes problems if the population size matches the number of MPI nodes
parameters = dict(parameter_space.items())
if gather == True and self._simulator.state.num_processes > 1:
# seems inefficient to do it in a loop - should do as single operation
for name in parameter_names:
values = parameters[name]
if isinstance(values, numpy.ndarray):
all_values = {self._simulator.state.mpi_rank: values.tolist()}
local_indices = numpy.arange(self.size)[self._mask_local].tolist()
all_indices = {self._simulator.state.mpi_rank: local_indices}
all_values = recording.gather_dict(all_values)
all_indices = recording.gather_dict(all_indices)
if self._simulator.state.mpi_rank == 0:
values = reduce(operator.add, all_values.values())
indices = reduce(operator.add, all_indices.values())
idx = numpy.argsort(indices)
values = numpy.array(values)[idx]
parameters[name] = values
try:
values = [parameters[name] for name in parameter_names]
except KeyError as err:
raise errors.NonExistentParameterError("%s. Valid parameters for %s are: %s" % (
err, self.celltype, self.celltype.get_parameter_names()))
if return_list:
return values
else:
assert len(parameter_names) == 1
return values[0]
def set(self, **parameters):
"""
Set one or more parameters for every cell in the population.
Values passed to set() may be:
(1) single values
(2) RandomDistribution objects
(3) lists/arrays of values of the same size as the population
(4) mapping functions, where a mapping function accepts a single
argument (the cell index) and returns a single value.
Here, a "single value" may be either a single number or a list/array of
numbers (e.g. for spike times). Values should be expressed in the
standard PyNN units (i.e. millivolts, nanoamps, milliseconds,
microsiemens, nanofarads, event per second).
Examples::
p.set(tau_m=20.0, v_rest=-65).
p.set(spike_times=[0.3, 0.7, 0.9, 1.4])
p.set(cm=rand_distr, tau_m=lambda i: 10 + i/10.0)
"""
# TODO: add example using of function of (x,y,z) and Population.position_generator
if self.local_size > 0:
if (isinstance(self.celltype, standardmodels.StandardCellType)
and any(name in self.celltype.computed_parameters() for name in parameters)):
# need to get existing parameter space of models so we can perform calculations
native_names = self.celltype.get_native_names()
parameter_space = self.celltype.reverse_translate(self._get_parameters(*native_names))
if self.local_size != self.size:
parameter_space.expand((self.size,), self._mask_local)
parameter_space.update(**parameters)
else:
parameter_space = ParameterSpace(parameters,
self.celltype.get_schema(),
(self.size,),
self.celltype.__class__)
if isinstance(self.celltype, standardmodels.StandardCellType):
parameter_space = self.celltype.translate(parameter_space)
assert parameter_space.shape == (self.size,), "{} != {}".format(parameter_space.shape, self.size)
self._set_parameters(parameter_space)
@deprecated("set(parametername=value_array)")
def tset(self, parametername, value_array):
"""
'Topographic' set. Set the value of parametername to the values in
value_array, which must have the same dimensions as the Population.
"""
self.set(**{parametername: value_array})
@deprecated("set(parametername=rand_distr)")
def rset(self, parametername, rand_distr):
"""
'Random' set. Set the value of parametername to a value taken from
rand_distr, which should be a RandomDistribution object.
"""
# Note that we generate enough random numbers for all cells on all nodes
# but use only those relevant to this node. This ensures that the
# sequence of random numbers does not depend on the number of nodes,
# provided that the same rng with the same seed is used on each node.
self.set(**{parametername: rand_distr})
def initialize(self, **initial_values):
"""
Set initial values of state variables, e.g. the membrane potential.
Values passed to initialize() may be:
(1) single numeric values (all neurons set to the same value)
(2) RandomDistribution objects
(3) lists/arrays of numbers of the same size as the population
(4) mapping functions, where a mapping function accepts a single
argument (the cell index) and returns a single number.
Values should be expressed in the standard PyNN units (i.e. millivolts,
nanoamps, milliseconds, microsiemens, nanofarads, event per second).
Examples::
p.initialize(v=-70.0)
p.initialize(v=rand_distr, gsyn_exc=0.0)
p.initialize(v=lambda i: -65 + i/10.0)
"""
for variable, value in initial_values.items():
logger.debug("In Population '%s', initialising %s to %s" % (self.label, variable, value))
initial_value = LazyArray(value, shape=(self.size,), dtype=float)
self._set_initial_value_array(variable, initial_value)
self.initial_values[variable] = initial_value
def find_units(self, variable):
return self.celltype.units[variable]
def can_record(self, variable):
"""Determine whether `variable` can be recorded from this population."""
return self.celltype.can_record(variable)
def record(self, variables, to_file=None, sampling_interval=None):
"""
Record the specified variable or variables for all cells in the
Population or view.
`variables` may be either a single variable name or a list of variable
names. For a given celltype class, `celltype.recordable` contains a list of
variables that can be recorded for that celltype.
If specified, `to_file` should be a Neo IO instance and `write_data()`
will be automatically called when `end()` is called.
`sampling_interval` should be a value in milliseconds, and an integer
multiple of the simulation timestep.
"""
if variables is None: # reset the list of things to record
# note that if record(None) is called on a view of a population
# recording will be reset for the entire population, not just the view
self.recorder.reset()
else:
logger.debug("%s.record('%s')", self.label, variables)
if self._record_filter is None:
self.recorder.record(variables, self.all_cells, sampling_interval)
else:
self.recorder.record(variables, self._record_filter, sampling_interval)
if isinstance(to_file, basestring):
self.recorder.file = to_file
@deprecated("record('v')")
def record_v(self, to_file=True):
"""
Record the membrane potential for all cells in the Population.
"""
self.record('v', to_file)
@deprecated("record(['gsyn_exc', 'gsyn_inh'])")
def record_gsyn(self, to_file=True):
"""
Record synaptic conductances for all cells in the Population.
"""
self.record(['gsyn_exc', 'gsyn_inh'], to_file)
def write_data(self, io, variables='all', gather=True, clear=False, annotations=None):
"""
Write recorded data to file, using one of the file formats supported by
Neo.
`io`:
a Neo IO instance
`variables`:
either a single variable name or a list of variable names.
Variables must have been previously recorded, otherwise an
Exception will be raised.
For parallel simulators, if `gather` is True, all data will be gathered
to the master node and a single output file created there. Otherwise, a
file will be written on each node, containing only data from the cells
simulated on that node.
If `clear` is True, recorded data will be deleted from the `Population`.
`annotations` should be a dict containing simple data types such as
numbers and strings. The contents will be written into the output data
file as metadata.
"""
logger.debug("Population %s is writing %s to %s [gather=%s, clear=%s]" % (self.label, variables, io, gather, clear))
self.recorder.write(variables, io, gather, self._record_filter, clear=clear,
annotations=annotations)
def get_data(self, variables='all', gather=True, clear=False):
"""
Return a Neo `Block` containing the data (spikes, state variables)
recorded from the Population.
`variables` - either a single variable name or a list of variable names
Variables must have been previously recorded, otherwise an
Exception will be raised.
For parallel simulators, if `gather` is True, all data will be gathered
to all nodes and the Neo `Block` will contain data from all nodes.
Otherwise, the Neo `Block` will contain only data from the cells
simulated on the local node.
If `clear` is True, recorded data will be deleted from the `Population`.
"""
return self.recorder.get(variables, gather, self._record_filter, clear)
@deprecated("write_data(file, 'spikes')")
def printSpikes(self, file, gather=True, compatible_output=True):
self.write_data(file, 'spikes', gather)
@deprecated("get_data('spikes')")
def getSpikes(self, gather=True, compatible_output=True):
return self.get_data('spikes', gather)
@deprecated("write_data(file, 'v')")
def print_v(self, file, gather=True, compatible_output=True):
self.write_data(file, 'v', gather)
@deprecated("get_data('v')")
def get_v(self, gather=True, compatible_output=True):
return self.get_data('v', gather)
@deprecated("write_data(file, ['gsyn_exc', 'gsyn_inh'])")
def print_gsyn(self, file, gather=True, compatible_output=True):
self.write_data(file, ['gsyn_exc', 'gsyn_inh'], gather)
@deprecated("get_data(['gsyn_exc', 'gsyn_inh'])")
def get_gsyn(self, gather=True, compatible_output=True):
return self.get_data(['gsyn_exc', 'gsyn_inh'], gather)
def get_spike_counts(self, gather=True):
"""
Returns a dict containing the number of spikes for each neuron.
The dict keys are neuron IDs, not indices.
"""
# arguably, we should use indices
return self.recorder.count('spikes', gather, self._record_filter)
@deprecated("mean_spike_count()")
def meanSpikeCount(self, gather=True):
return self.mean_spike_count(gather)
def mean_spike_count(self, gather=True):
"""
Returns the mean number of spikes per neuron.
"""
spike_counts = self.get_spike_counts(gather)
total_spikes = sum(spike_counts.values())
if self._simulator.state.mpi_rank == 0 or not gather: # should maybe use allgather, and get the numbers on all nodes
if len(spike_counts) > 0:
return float(total_spikes) / len(spike_counts)
else:
return 0
else:
return numpy.nan
def inject(self, current_source):
"""
Connect a current source to all cells in the Population.
"""
if not self.celltype.injectable:
raise TypeError("Can't inject current into a spike source.")
current_source.inject_into(self)
# name should be consistent with saving/writing data, i.e. save_data() and save_positions() or write_data() and write_positions()
def save_positions(self, file):
"""
Save positions to file. The output format is ``index x y z``
"""
if isinstance(file, basestring):
file = recording.files.StandardTextFile(file, mode='w')
cells = self.all_cells
result = numpy.empty((len(cells), 4))
result[:, 0] = numpy.array([self.id_to_index(id) for id in cells])
result[:, 1:4] = self.positions.T
if self._simulator.state.mpi_rank == 0:
file.write(result, {'population': self.label})
file.close()
class Population(BasePopulation):
"""
A group of neurons all of the same type. "Population" is used as a generic
term intended to include layers, columns, nuclei, etc., of cells.
Arguments:
`size`:
number of cells in the Population. For backwards-compatibility,
`size` may also be a tuple giving the dimensions of a grid,
e.g. ``size=(10,10)`` is equivalent to ``size=100`` with ``structure=Grid2D()``.
`cellclass`:
a cell type (a class inheriting from :class:`pyNN.models.BaseCellType`).
`cellparams`:
a dict, or other mapping, containing parameters, which is passed to
the neuron model constructor.
`structure`:
a :class:`pyNN.space.Structure` instance, used to specify the
positions of neurons in space.
`initial_values`:
a dict, or other mapping, containing initial values for the neuron
state variables.
`label`:
a name for the population. One will be auto-generated if this is not
supplied.
"""
_nPop = 0
def __init__(self, size, cellclass, cellparams=None, structure=None,
initial_values={}, label=None):
"""
Create a population of neurons all of the same type.
"""
if not hasattr(self, "_simulator"):
errmsg = "`common.Population` should not be instantiated directly. " \
"You should import Population from a PyNN backend module, " \
"e.g. pyNN.nest or pyNN.neuron"
raise Exception(errmsg)
if not isinstance(size, int): # also allow a single integer, for a 1D population
assert isinstance(size, tuple), "`size` must be an integer or a tuple of ints. You have supplied a %s" % type(size)
# check the things inside are ints
for e in size:
assert isinstance(e, int), "`size` must be an integer or a tuple of ints. Element '%s' is not an int" % str(e)
assert structure is None, "If you specify `size` as a tuple you may not specify structure."
if len(size) == 1:
structure = space.Line()
elif len(size) == 2:
nx, ny = size
structure = space.Grid2D(nx / float(ny))
elif len(size) == 3:
nx, ny, nz = size
structure = space.Grid3D(nx / float(ny), nx / float(nz))
else:
raise Exception("A maximum of 3 dimensions is allowed. What do you think this is, string theory?")
size = int(reduce(operator.mul, size)) # NEST doesn't like numpy.int, so to be safe we cast to Python int
self.size = size
self.label = label or 'population%d' % Population._nPop
self._structure = structure or space.Line()
self._positions = None
self._is_sorted = True
if isinstance(cellclass, BaseCellType):
self.celltype = cellclass
assert cellparams is None # cellparams being retained for backwards compatibility, but use is deprecated
elif issubclass(cellclass, BaseCellType):
self.celltype = cellclass(**cellparams)
# emit deprecation warning
else:
raise TypeError("cellclass must be an instance or subclass of BaseCellType, not a %s" % type(cellclass))
self.annotations = {}
self.recorder = self._recorder_class(self)
# Build the arrays of cell ids
# Cells on the local node are represented as ID objects, other cells by integers
# All are stored in a single numpy array for easy lookup by address
# The local cells are also stored in a list, for easy iteration
self._create_cells()
self.first_id = self.all_cells[0]
self.last_id = self.all_cells[-1]
self.initial_values = {}
all_initial_values = self.celltype.default_initial_values.copy()
all_initial_values.update(initial_values)
self.initialize(**all_initial_values)
Population._nPop += 1
def __repr__(self):
return "Population(%d, %r, structure=%r, label=%r)" % (self.size, self.celltype, self.structure, self.label)
@property
def local_cells(self):
"""
An array containing cell ids for the local node.
"""
return self.all_cells[self._mask_local]
def id_to_index(self, id):
"""
Given the ID(s) of cell(s) in the Population, return its (their) index
(order in the Population).
>>> assert p.id_to_index(p[5]) == 5
"""
if not numpy.iterable(id):
if not self.first_id <= id <= self.last_id:
raise ValueError("id should be in the range [%d,%d], actually %d" % (self.first_id, self.last_id, id))
return int(id - self.first_id) # this assumes ids are consecutive
else:
if isinstance(id, PopulationView):
id = id.all_cells
id = numpy.array(id)
if (self.first_id > id.min()) or (self.last_id < id.max()):
raise ValueError("ids should be in the range [%d,%d], actually [%d, %d]" % (self.first_id, self.last_id, id.min(), id.max()))
return (id - self.first_id).astype(numpy.int) # this assumes ids are consecutive
def id_to_local_index(self, id):
"""
Given the ID(s) of cell(s) in the Population, return its (their) index
(order in the Population), counting only cells on the local MPI node.
"""
if self._simulator.state.num_processes > 1:
return self.local_cells.tolist().index(id) # probably very slow
#return numpy.nonzero(self.local_cells == id)[0][0] # possibly faster?
# another idea - get global index, use idx-sum(mask_local[:idx])?
else:
return self.id_to_index(id)
def _get_structure(self):
"""The spatial structure of the Population."""
return self._structure
def _set_structure(self, structure):
assert isinstance(structure, space.BaseStructure)
if self._structure is None or structure != self._structure:
self._positions = None # setting a new structure invalidates previously calculated positions
self._structure = structure
structure = property(fget=_get_structure, fset=_set_structure)
# arguably structure should be read-only, i.e. it is not possible to change it after Population creation
def _get_positions(self):
"""
Try to return self._positions. If it does not exist, create it and then
return it.
"""
if self._positions is None:
self._positions = self.structure.generate_positions(self.size)
assert self._positions.shape == (3, self.size)
return self._positions
def _set_positions(self, pos_array):
assert isinstance(pos_array, numpy.ndarray)
assert pos_array.shape == (3, self.size), "%s != %s" % (pos_array.shape, (3, self.size))
self._positions = pos_array.copy() # take a copy in case pos_array is changed later
self._structure = None # explicitly setting positions destroys any previous structure
positions = property(_get_positions, _set_positions,
doc="""A 3xN array (where N is the number of neurons in the Population)
giving the x,y,z coordinates of all the neurons (soma, in the
case of non-point models).""")
def annotate(self, **annotations):
self.annotations.update(annotations)
def describe(self, template='population_default.txt', engine='default'):
"""
Returns a human-readable description of the population.
The output may be customized by specifying a different template
together with an associated template engine (see :mod:`pyNN.descriptions`).
If template is None, then a dictionary containing the template context
will be returned.
"""
context = {
"label": self.label,
"celltype": self.celltype.describe(template=None),
"structure": None,
"size": self.size,
"size_local": len(self.local_cells),
"first_id": self.first_id,
"last_id": self.last_id,
}
context.update(self.annotations)
if len(self.local_cells) > 0:
first_id = self.local_cells[0]
context.update({
"local_first_id": first_id,
"cell_parameters": {} # first_id.get_parameters(),
})
if self.structure:
context["structure"] = self.structure.describe(template=None)
return descriptions.render(engine, template, context)
class PopulationView(BasePopulation):
"""
A view of a subset of neurons within a Population.
In most ways, Populations and PopulationViews have the same behaviour, i.e.
they can be recorded, connected with Projections, etc. It should be noted
that any changes to neurons in a PopulationView will be reflected in the
parent Population and vice versa.
It is possible to have views of views.
Arguments:
selector:
a slice or numpy mask array. The mask array should either be a
boolean array of the same size as the parent, or an integer array
containing cell indices, i.e. if p.size == 5::
PopulationView(p, array([False, False, True, False, True]))
PopulationView(p, array([2,4]))
PopulationView(p, slice(2,5,2))
will all create the same view.
"""
def __init__(self, parent, selector, label=None):
"""
Create a view of a subset of neurons within a parent Population or
PopulationView.
"""
if not hasattr(self, "_simulator"):
errmsg = "`common.PopulationView` should not be instantiated directly. " \
"You should import PopulationView from a PyNN backend module, " \
"e.g. pyNN.nest or pyNN.neuron"
raise Exception(errmsg)
self.parent = parent
self.mask = selector # later we can have fancier selectors, for now we just have numpy masks
# maybe just redefine __getattr__ instead of the following...
self.celltype = self.parent.celltype
# If the mask is a slice, IDs will be consecutives without duplication.
# If not, then we need to remove duplicated IDs
if not isinstance(self.mask, slice):
if isinstance(self.mask, list):
self.mask = numpy.array(self.mask)
if self.mask.dtype is numpy.dtype('bool'):
if len(self.mask) != len(self.parent):
raise Exception("Boolean masks should have the size of Parent Population")
self.mask = numpy.arange(len(self.parent))[self.mask]
if len(numpy.unique(self.mask)) != len(self.mask):
logging.warning("PopulationView can contain only once each ID, duplicated IDs are remove")
self.mask = numpy.unique(self.mask)
self.all_cells = self.parent.all_cells[self.mask] # do we need to ensure this is ordered?
idx = numpy.argsort(self.all_cells)
self._is_sorted = numpy.all(idx == numpy.arange(len(self.all_cells)))
self.size = len(self.all_cells)
self.label = label or "view of '%s' with size %s" % (parent.label, self.size)
self._mask_local = self.parent._mask_local[self.mask]
self.local_cells = self.all_cells[self._mask_local]
self.first_id = numpy.min(self.all_cells) # only works if we assume all_cells is sorted, otherwise could use min()
self.last_id = numpy.max(self.all_cells)
self.recorder = self.parent.recorder
self._record_filter = self.all_cells
def __repr__(self):
return "PopulationView(parent=%r, selector=%r, label=%r)" % (self.parent, self.mask, self.label)
@property
def initial_values(self):
# this is going to be complex - if we keep initial_values as a dict,
# need to return a dict-like object that takes account of self.mask
raise NotImplementedError
@property
def structure(self):
"""The spatial structure of the parent Population."""
return self.parent.structure
# should we allow setting structure for a PopulationView? Maybe if the
# parent has some kind of CompositeStructure?
@property
def positions(self):
return self.parent.positions.T[self.mask].T # make positions N,3 instead of 3,N to avoid all this transposing?
def id_to_index(self, id):
"""
Given the ID(s) of cell(s) in the PopulationView, return its/their
index/indices (order in the PopulationView).
>>> assert pv.id_to_index(pv[3]) == 3
"""
if not numpy.iterable(id):
if self._is_sorted:
if id not in self.all_cells:
raise IndexError("ID %s not present in the View" % id)
return numpy.searchsorted(self.all_cells, id)
else:
result = numpy.where(self.all_cells == id)[0]
if len(result) == 0:
raise IndexError("ID %s not present in the View" % id)
else:
return result
else:
if self._is_sorted:
return numpy.searchsorted(self.all_cells, id)
else:
result = numpy.array([], dtype=numpy.int)
for item in id:
data = numpy.where(self.all_cells == item)[0]
if len(data) == 0:
raise IndexError("ID %s not present in the View" % item)
elif len(data) > 1:
raise Exception("ID %s is duplicated in the View" % item)
else:
result = numpy.append(result, data)
return result
@property
def grandparent(self):
"""
Returns the parent Population at the root of the tree (since the
immediate parent may itself be a PopulationView).
The name "grandparent" is of course a little misleading, as it could
be just the parent, or the great, great, great, ..., grandparent.
"""
if hasattr(self.parent, "parent"):
return self.parent.grandparent
else:
return self.parent
def index_in_grandparent(self, indices):
"""
Given an array of indices, return the indices in the parent population
at the root of the tree.
"""
indices_in_parent = numpy.arange(self.parent.size)[self.mask][indices]
if hasattr(self.parent, "parent"):
return self.parent.index_in_grandparent(indices_in_parent)
else:
return indices_in_parent
def describe(self, template='populationview_default.txt', engine='default'):
"""
Returns a human-readable description of the population view.
The output may be customized by specifying a different template
togther with an associated template engine (see ``pyNN.descriptions``).
If template is None, then a dictionary containing the template context
will be returned.
"""
context = {"label": self.label,
"parent": self.parent.label,
"mask": self.mask,
"size": self.size}
return descriptions.render(engine, template, context)
class Assembly(object):
"""
A group of neurons, may be heterogeneous, in contrast to a Population where
all the neurons are of the same type.
Arguments:
populations:
Populations or PopulationViews
kwargs:
May contain a keyword argument 'label'
"""
_count = 0
def __init__(self, *populations, **kwargs):
"""
Create an Assembly of Populations and/or PopulationViews.
"""
if not hasattr(self, "_simulator"):
errmsg = "`common.Assembly` should not be instantiated directly. " \
"You should import Assembly from a PyNN backend module, " \
"e.g. pyNN.nest or pyNN.neuron"
raise Exception(errmsg)
if kwargs:
assert list(kwargs.keys()) == ['label']
self.populations = []
for p in populations:
self._insert(p)
self.label = kwargs.get('label', 'assembly%d' % Assembly._count)
assert isinstance(self.label, basestring), "label must be a string or unicode"
Assembly._count += 1
def __repr__(self):
return "Assembly(*%r, label=%r)" % (self.populations, self.label)
def _insert(self, element):
if not isinstance(element, BasePopulation):
raise TypeError("argument is a %s, not a Population." % type(element).__name__)
if isinstance(element, PopulationView):
if not element.parent in self.populations:
double = False
for p in self.populations:
data = numpy.concatenate((p.all_cells, element.all_cells))
if len(numpy.unique(data)) != len(p.all_cells) + len(element.all_cells):
logging.warning('Adding a PopulationView to an Assembly containing elements already present is not posible')
double = True # Should we automatically remove duplicated IDs ?
break
if not double:
self.populations.append(element)
else:
logging.warning('Adding a PopulationView to an Assembly when parent Population is there is not possible')
elif isinstance(element, BasePopulation):
if not element in self.populations:
self.populations.append(element)
else:
logging.warning('Adding a Population twice in an Assembly is not possible')
@property
def local_cells(self):
result = self.populations[0].local_cells
for p in self.populations[1:]:
result = numpy.concatenate((result, p.local_cells))
return result
@property
def all_cells(self):
result = self.populations[0].all_cells
for p in self.populations[1:]:
result = numpy.concatenate((result, p.all_cells))
return result
def all(self):
"""Iterator over cell ids on all nodes."""
return iter(self.all_cells)
@property
def _is_sorted(self):
idx = numpy.argsort(self.all_cells)
return numpy.all(idx == numpy.arange(len(self.all_cells)))
@property
def _homogeneous_synapses(self):
cb = [p.celltype.conductance_based for p in self.populations]
return all(cb) or not any(cb)
@property
def conductance_based(self):
"""
`True` if the post-synaptic response is modelled as a change
in conductance, `False` if a change in current.
"""
return all(p.celltype.conductance_based for p in self.populations)
@property
def receptor_types(self):
"""
Return a list of receptor types that are common to all populations
within the assembly.
"""
rts = set(self.populations[0].celltype.receptor_types)
if len(self.populations) > 1:
for p in self.populations[1:]:
rts = rts.intersection(set(p.celltype.receptor_types))
return rts
def find_units(self, variable):
units = set(p.find_units(variable) for p in self.populations)
if len(units) > 1:
raise ValueError("Inconsistent units")
return units
@property
def _mask_local(self):
result = self.populations[0]._mask_local
for p in self.populations[1:]:
result = numpy.concatenate((result, p._mask_local))
return result
@property
def first_id(self):
return numpy.min(self.all_cells)
@property
def last_id(self):
return numpy.max(self.all_cells)
def id_to_index(self, id):
"""
Given the ID(s) of cell(s) in the Assembly, return its (their) index
(order in the Assembly)::
>>> assert p.id_to_index(p[5]) == 5
>>> assert p.id_to_index(p.index([1, 2, 3])) == [1, 2, 3]
"""
all_cells = self.all_cells
if not numpy.iterable(id):
if self._is_sorted:
return numpy.searchsorted(all_cells, id)
else:
result = numpy.where(all_cells == id)[0]
if len(result) == 0:
raise IndexError("ID %s not present in the View" % id)
else:
return result
else:
if self._is_sorted:
return numpy.searchsorted(all_cells, id)
else:
result = numpy.array([], dtype=numpy.int)
for item in id:
data = numpy.where(all_cells == item)[0]
if len(data) == 0:
raise IndexError("ID %s not present in the Assembly" % item)
elif len(data) > 1:
raise Exception("ID %s is duplicated in the Assembly" % item)
else:
result = numpy.append(result, data)
return result
@property
def positions(self):
result = self.populations[0].positions
for p in self.populations[1:]:
result = numpy.hstack((result, p.positions))
return result
@property
def size(self):
return sum(p.size for p in self.populations)
def __iter__(self):
"""
Iterator over cells in all populations within the Assembly, for cells
on the local MPI node.
"""
iterators = [iter(p) for p in self.populations]
return chain(*iterators)
def __len__(self):
"""Return the total number of cells in the population (all nodes)."""
return self.size
def __getitem__(self, index):
"""
Where `index` is an integer, return an ID.
Where `index` is a slice, tuple, list or numpy array, return a new Assembly
consisting of appropriate populations and (possibly newly created)
population views.
"""
count = 0; boundaries = [0]
for p in self.populations:
count += p.size
boundaries.append(count)
boundaries = numpy.array(boundaries, dtype=numpy.int)
if isinstance(index, (int, numpy.integer)): # return an ID
pindex = boundaries[1:].searchsorted(index, side='right')
return self.populations[pindex][index - boundaries[pindex]]
elif isinstance(index, (slice, tuple, list, numpy.ndarray)):
if isinstance(index, slice):
indices = numpy.arange(self.size)[index]
else:
indices = numpy.array(index)
pindices = boundaries[1:].searchsorted(indices, side='right')
views = [self.populations[i][indices[pindices == i] - boundaries[i]] for i in numpy.unique(pindices)]
return self.__class__(*views)
else:
raise TypeError("indices must be integers, slices, lists, arrays, not %s" % type(index).__name__)
def __add__(self, other):
"""
An Assembly may be added to a Population, PopulationView or Assembly
with the '+' operator, returning a new Assembly, e.g.::
a2 = a1 + p
"""
if isinstance(other, BasePopulation):
return self.__class__(*(self.populations + [other]))
elif isinstance(other, Assembly):
return self.__class__(*(self.populations + other.populations))
else:
raise TypeError("can only add a Population or another Assembly to an Assembly")
def __iadd__(self, other):
"""
A Population, PopulationView or Assembly may be added to an existing
Assembly using the '+=' operator, e.g.::
a += p
"""
if isinstance(other, BasePopulation):
self._insert(other)
elif isinstance(other, Assembly):
for p in other.populations:
self._insert(p)
else:
raise TypeError("can only add a Population or another Assembly to an Assembly")
return self
def sample(self, n, rng=None):
"""
Randomly sample `n` cells from the Assembly, and return a Assembly
object.
"""
assert isinstance(n, int)
if not rng:
rng = random.NumpyRNG()
indices = rng.permutation(numpy.arange(len(self), dtype=numpy.int))[0:n]
logger.debug("The %d cells recorded have indices %s" % (n, indices))
logger.debug("%s.sample(%s)", self.label, n)
return self[indices]
def initialize(self, **initial_values):
"""
Set the initial values of the state variables of the neurons in
this assembly.
"""
for p in self.populations:
p.initialize(**initial_values)
def get(self, parameter_names, gather=False, simplify=True):
"""
Get the values of the given parameters for every local cell in the
Assembly, or, if gather=True, for all cells in the Assembly.
"""
if isinstance(parameter_names, basestring):
parameter_names = (parameter_names,)
return_list = False
else:
return_list = True
parameters = defaultdict(list)
for p in self.populations:
population_values = p.get(parameter_names, gather, simplify=False)
for name, arr in zip(parameter_names, population_values):
parameters[name].append(arr)
for name, value_list in parameters.items():
parameters[name] = numpy.hstack(value_list)
if simplify:
parameters[name] = simplify_parameter_array(parameters[name])
values = [parameters[name] for name in parameter_names]
if return_list:
return values
else:
assert len(parameter_names) == 1
return values[0]
def set(self, **parameters):
"""
Set one or more parameters for every cell in the Assembly.
Values passed to set() may be:
(1) single values
(2) RandomDistribution objects
(3) mapping functions, where a mapping function accepts a single
argument (the cell index) and returns a single value.
Here, a "single value" may be either a single number or a list/array of
numbers (e.g. for spike times).
"""
for p in self.populations:
p.set(**parameters)
@deprecated("set(parametername=rand_distr)")
def rset(self, parametername, rand_distr):
self.set(parametername=rand_distr)
def record(self, variables, to_file=None, sampling_interval=None):
"""
Record the specified variable or variables for all cells in the Assembly.
`variables` may be either a single variable name or a list of variable
names. For a given celltype class, `celltype.recordable` contains a list of
variables that can be recorded for that celltype.
If specified, `to_file` should be a Neo IO instance and `write_data()`
will be automatically called when `end()` is called.
"""
for p in self.populations:
p.record(variables, to_file, sampling_interval)
@deprecated("record('v')")
def record_v(self, to_file=True):
"""Record the membrane potential from all cells in the Assembly."""
self.record('v', to_file)
@deprecated("record(['gsyn_exc', 'gsyn_inh'])")
def record_gsyn(self, to_file=True):
"""Record synaptic conductances from all cells in the Assembly."""
self.record(['gsyn_exc', 'gsyn_inh'], to_file)
def get_population(self, label):
"""
Return the Population/PopulationView from within the Assembly that has
the given label. If no such Population exists, raise KeyError.
"""
for p in self.populations:
if label == p.label:
return p
raise KeyError("Assembly does not contain a population with the label %s" % label)
def save_positions(self, file):
"""
Save positions to file. The output format is id x y z
"""
if isinstance(file, basestring):
file = files.StandardTextFile(file, mode='w')
cells = self.all_cells
result = numpy.empty((len(cells), 4))
result[:, 0] = numpy.array([self.id_to_index(id) for id in cells])
result[:, 1:4] = self.positions.T
if self._simulator.state.mpi_rank == 0:
file.write(result, {'assembly': self.label})
file.close()
@property
def position_generator(self):
def gen(i):
return self.positions[:, i]
return gen
def get_data(self, variables='all', gather=True, clear=False, annotations=None):
"""
Return a Neo `Block` containing the data (spikes, state variables)
recorded from the Assembly.
`variables` - either a single variable name or a list of variable names
Variables must have been previously recorded, otherwise an
Exception will be raised.
For parallel simulators, if `gather` is True, all data will be gathered
to all nodes and the Neo `Block` will contain data from all nodes.
Otherwise, the Neo `Block` will contain only data from the cells
simulated on the local node.
If `clear` is True, recorded data will be deleted from the `Assembly`.
"""
name = self.label
description = self.describe()
blocks = [p.get_data(variables, gather, clear) for p in self.populations]
# adjust channel_ids to match assembly channel indices
offset = 0
for block, p in zip(blocks, self.populations):
for segment in block.segments:
for signal_array in segment.analogsignals:
signal_array.channel_index.channel_ids += offset
offset += p.size
for i, block in enumerate(blocks):
logger.debug("%d: %s", i, block.name)
for j, segment in enumerate(block.segments):
logger.debug(" %d: %s", j, segment.name)
for arr in segment.analogsignals:
logger.debug(" %s %s", arr.shape, arr.name)
merged_block = blocks[0]
for block in blocks[1:]:
merged_block.merge(block)
merged_block.name = name
merged_block.description = description
if annotations:
merged_block.annotate(**annotations)
return merged_block
@deprecated("get_data('spikes')")
def getSpikes(self, gather=True, compatible_output=True):
return self.get_data('spikes', gather)
@deprecated("get_data('v')")
def get_v(self, gather=True, compatible_output=True):
return self.get_data('v', gather)
@deprecated("get_data(['gsyn_exc', 'gsyn_inh'])")
def get_gsyn(self, gather=True, compatible_output=True):
return self.get_data(['gsyn_exc', 'gsyn_inh'], gather)
def mean_spike_count(self, gather=True):
"""
Returns the mean number of spikes per neuron.
"""
spike_counts = self.get_spike_counts()
total_spikes = sum(spike_counts.values())
if self._simulator.state.mpi_rank == 0 or not gather: # should maybe use allgather, and get the numbers on all nodes
return float(total_spikes) / len(spike_counts)
else:
return numpy.nan
def get_spike_counts(self, gather=True):
"""
Returns the number of spikes for each neuron.
"""
try:
spike_counts = self.populations[0].recorder.count('spikes', gather, self.populations[0]._record_filter)
except errors.NothingToWriteError:
spike_counts = {}
for p in self.populations[1:]:
try:
spike_counts.update(p.recorder.count('spikes', gather, p._record_filter))
except errors.NothingToWriteError:
pass
return spike_counts
def write_data(self, io, variables='all', gather=True, clear=False, annotations=None):
"""
Write recorded data to file, using one of the file formats supported by
Neo.
`io`:
a Neo IO instance
`variables`:
either a single variable name or a list of variable names.
Variables must have been previously recorded, otherwise an
Exception will be raised.
For parallel simulators, if `gather` is True, all data will be gathered
to the master node and a single output file created there. Otherwise, a
file will be written on each node, containing only data from the cells
simulated on that node.
If `clear` is True, recorded data will be deleted from the `Population`.
"""
if isinstance(io, basestring):
io = recording.get_io(io)
if gather is False and self._simulator.state.num_processes > 1:
io.filename += '.%d' % self._simulator.state.mpi_rank
logger.debug("Recorder is writing '%s' to file '%s' with gather=%s" % (
variables, io.filename, gather))
| data = self.get_data(variables, gather, clear, annotations) | 5,678 | lcc_e | python | null | 594c693ecfddc23819ec6a8d933d0930ccc6bb544441a19b |
|
# Bob build tool
# Copyright (C) 2016 TechniSat Digital GmbH
#
# SPDX-License-Identifier: GPL-3.0-or-later
from . import BOB_VERSION, BOB_INPUT_HASH, DEBUG
from .errors import ParseError, BobError
from .languages import getLanguage, ScriptLanguage, BashLanguage, PwshLanguage
from .pathspec import PackageSet
from .scm import CvsScm, GitScm, ImportScm, SvnScm, UrlScm, ScmOverride, \
auditFromDir, getScm, SYNTHETIC_SCM_PROPS
from .state import BobState
from .stringparser import checkGlobList, Env, DEFAULT_STRING_FUNS, IfExpression
from .tty import InfoOnce, Warn, WarnOnce, setColorMode
from .utils import asHexStr, joinScripts, compareVersion, binStat, \
updateDicRecursive, hashString, getPlatformTag, isWindows, getPlatformString
from itertools import chain
from os.path import expanduser
from string import Template
from textwrap import dedent
import copy
import hashlib
import fnmatch
import os, os.path
import pickle
import re
import schema
import sqlite3
import struct
import sys
try:
from yaml import load as yamlLoad, CSafeLoader as YamlSafeLoader
except ImportError:
from yaml import load as yamlLoad, SafeLoader as YamlSafeLoader
warnFilter = WarnOnce("The filter keyword is experimental and might change or vanish in the future.")
warnDepends = WarnOnce("The same package is named multiple times as dependency!",
help="Only the first such incident is reported. This behavior will be treated as an error in the future.")
warnDeprecatedPluginState = Warn("Plugin uses deprecated 'bob.input.PluginState' API!")
warnDeprecatedStringFn = Warn("Plugin uses deprecated 'stringFunctions' API!")
def overlappingPaths(p1, p2):
p1 = os.path.normcase(os.path.normpath(p1)).split(os.sep)
if p1 == ["."]: p1 = []
p2 = os.path.normcase(os.path.normpath(p2)).split(os.sep)
if p2 == ["."]: p2 = []
for i in range(min(len(p1), len(p2))):
if p1[i] != p2[i]: return False
return True
def __maybeGlob(pred):
if pred.startswith("!"):
pred = pred[1:]
if any(i in pred for i in '*?[]'):
return lambda prev, elem: False if fnmatch.fnmatchcase(elem, pred) else prev
else:
return lambda prev, elem: False if elem == pred else prev
else:
if any(i in pred for i in '*?[]'):
return lambda prev, elem: True if fnmatch.fnmatchcase(elem, pred) else prev
else:
return lambda prev, elem: True if elem == pred else prev
def maybeGlob(pattern):
if isinstance(pattern, list):
return [ __maybeGlob(p) for p in pattern ]
else:
return None
class __uidGen:
def __init__(self):
self.cur = 0
def get(self):
self.cur += 1
return self.cur
uidGen = __uidGen().get
class DigestHasher:
def __init__(self):
self.__recipes = bytearray()
self.__host = bytearray()
def update(self, real):
"""Add bytes to recipe-internal part of digest."""
self.__recipes.extend(real)
def fingerprint(self, imag):
"""Add bytes of fingerprint to host part of digest."""
self.__host.extend(imag)
def digest(self):
"""Calculate final digest value.
If no host fingerprints were added only the recipe-internal digest is
emitted. Otherwise the fingerprint digest is appended. This keeps the
calculation backwards compatible (Bob <0.15).
"""
if self.__host:
return hashlib.sha1(self.__recipes).digest() + \
hashlib.sha1(self.__host).digest()
else:
return hashlib.sha1(self.__recipes).digest()
@staticmethod
def sliceRecipes(digest):
"""Extract recipe-internal digest part."""
return digest[:20]
@staticmethod
def sliceHost(digest):
"""Extract host fingerprint digest part (if any)."""
return digest[20:]
def fetchFingerprintScripts(recipe):
return {
ScriptLanguage.BASH : recipe.get("fingerprintScriptBash",
recipe.get("fingerprintScript")),
ScriptLanguage.PWSH : recipe.get("fingerprintScriptPwsh",
recipe.get("fingerprintScript")),
}
def fetchScripts(recipe, prefix, resolveBash, resolvePwsh):
return {
ScriptLanguage.BASH : (
resolveBash(recipe.get(prefix + "SetupBash", recipe.get(prefix + "Setup")),
prefix + "Setup[Bash]"),
resolveBash(recipe.get(prefix + "ScriptBash", recipe.get(prefix + "Script")),
prefix + "Script[Bash]"),
),
ScriptLanguage.PWSH : (
resolvePwsh(recipe.get(prefix + "SetupPwsh", recipe.get(prefix + "Setup")),
prefix + "Setup[Pwsh]"),
resolvePwsh(recipe.get(prefix + "ScriptPwsh", recipe.get(prefix + "Script")),
prefix + "Script[Pwsh]"),
)
}
def mergeScripts(fragments, glue):
"""Join all scripts of the recipe and its classes.
The result is a tuple with (setupScript, mainScript, digestScript)
"""
return (
joinScripts((f[0][0] for f in fragments), glue),
joinScripts((f[1][0] for f in fragments), glue),
joinScripts(
( joinScripts((f[0][1] for f in fragments), "\n"),
joinScripts((f[1][1] for f in fragments), "\n"),
), "\n")
)
class PluginProperty:
"""Base class for plugin property handlers.
A plugin should sub-class this class to parse custom properties in a
recipe. For each recipe an object of that class is created then. The
default constructor just stores the *present* and *value* parameters as
attributes in the object.
:param bool present: True if property is present in recipe
:param value: Unmodified value of property from recipe or None if not present.
"""
def __init__(self, present, value):
self.present = present
self.value = value
def inherit(self, cls):
"""Inherit from a class.
The default implementation will use the value from the class if the
property was not present. Otherwise the class value will be ignored.
"""
if not self.present:
self.present = cls.present
self.value = cls.value
def isPresent(self):
"""Return True if the property was present in the recipe."""
return self.present
def getValue(self):
"""Get (parsed) value of the property."""
return self.value
@staticmethod
def validate(data):
"""Validate type of property.
Ususally the plugin will reimplement this static method and return True
only if *data* has the expected type. The default implementation will
always return True.
:param data: Parsed property data from the recipe
:return: True if data has expected type, otherwise False.
"""
return True
class PluginState:
"""Base class for plugin state trackers.
State trackers are used by plugins to compute the value of one or more
properties as the dependency tree of all recipes is traversed.
.. attention::
Objects of this class are tested for equivalence. The default
implementation compares all members of the involved objects. If custom
types are stored in the object you have to provide a suitable
``__eq__`` and ``__ne__`` implementation because Python falls back to
object identity which might not be correct. If these operators are not
working correctly then Bob may slow down considerably.
"""
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return vars(self) != vars(other)
def copy(self):
"""Return a copy of the object.
The default implementation uses copy.deepcopy() which should usually be
enough. If the plugin uses a sophisticated state tracker, especially
when holding references to created packages, it might be usefull to
provide a specialized implementation.
"""
return copy.deepcopy(self)
def onEnter(self, env, properties):
"""Begin creation of a package.
The state tracker is about to witness the creation of a package. The passed
environment, tools and (custom) properties are in their initial state that
was inherited from the parent recipe.
:param env: Complete environment
:type env: Mapping[str, str]
:param properties: All custom properties
:type properties: Mapping[str, :class:`bob.input.PluginProperty`]
"""
pass
def onUse(self, downstream):
"""Use provided state of downstream package.
This method is called if the user added the name of the state tracker
to the ``use`` clause in the recipe. A state tracker supporting this
notion should somehow pick up and merge the state of the downstream
package.
The default implementation does nothing.
:param bob.input.PluginState downstream: State of downstream package
"""
pass
def onFinish(self, env, properties):
"""Finish creation of a package.
The package was computed. The passed *env* and *properties* have their
final state after all downstream dependencies have been resolved.
:param env: Complete environment
:type env: Mapping[str, str]
:param properties: All custom properties
:type properties: Mapping[str, :class:`bob.input.PluginProperty`]
"""
pass
class PluginSetting:
"""Base class for plugin settings.
Plugins can be configured in the user configuration of a project. The
plugin must derive from this class, create an object with the default value
and assign it to 'settings' in the plugin manifest. The default
constructor will just store the passed value in the ``settings`` member.
:param settings: The default settings
"""
def __init__(self, settings):
self.settings = settings
def merge(self, other):
"""Merge other settings into current ones.
This method is called when other configuration files with a higher
precedence have been parsed. The settings in these files are first
validated by invoking the ``validate`` static method. Then this method
is called that should update the current object with the value of
*other*.
The default implementation implements the following policy:
* Dictionaries are merged recursively on a key-by-key basis
* Lists are appended to each other
* Everything else in *other* reuucplaces the current settings
It is assumed that the actual settings are stored in the ``settings``
member variable.
:param other: Other settings with higher precedence
"""
if isinstance(self.settings, dict) and isinstance(other, dict):
self.settings = updateDicRecursive(self.settings, other)
elif isinstance(self.settings, list) and isinstance(other, list):
self.settings = self.settings + other
else:
self.settings = other
def getSettings(self):
"""Getter for settings data."""
return self.settings
@staticmethod
def validate(data):
"""Validate type of settings.
Ususally the plugin will reimplement this method and return True only
if *data* has the expected type. The default implementation will always
return True.
:param data: Parsed settings data from user configuration
:return: True if data has expected type, otherwise False.
"""
return True
def pluginStateCompat(cls):
"""Small compat decorator to roughly support <0.15 plugins"""
_onEnter = cls.onEnter
_onFinish = cls.onFinish
def onEnter(self, env, properties):
_onEnter(self, env, {}, properties)
def onFinish(self, env, properties):
_onFinish(self, env, {}, properties, None)
# wrap overridden methods
if cls.onEnter is not PluginState.onEnter:
cls.onEnter = onEnter
if cls.onFinish is not PluginState.onFinish:
cls.onFinish = onFinish
def pluginStringFunCompat(oldFun):
def newFun(args, **kwargs):
return oldFun(args, tools={}, **kwargs)
return newFun
class BuiltinSetting(PluginSetting):
"""Tiny wrapper to define Bob built-in settings"""
def __init__(self, schema, updater, mangle = False):
self.__schema = schema
self.__updater = updater
self.__mangle = mangle
def merge(self, other):
self.__updater(self.__schema.validate(other) if self.__mangle else other)
def validate(self, data):
try:
self.__schema.validate(data)
return True
except schema.SchemaError:
return False
def Scm(spec, env, overrides, recipeSet):
# resolve with environment
spec = { k : ( env.substitute(v, "checkoutSCM::"+k) if isinstance(v, str) else v)
for (k, v) in spec.items() }
# apply overrides before creating scm instances. It's possible to switch the Scm type with an override..
matchedOverrides = []
for override in overrides:
matched, spec = override.mangle(spec, env)
if matched:
matchedOverrides.append(override)
# check schema again if any SCM override matched
if matchedOverrides:
try:
recipeSet.SCM_SCHEMA.validate({ k:v for k,v in spec.items()
if k not in SYNTHETIC_SCM_PROPS })
except schema.SchemaError as e:
raise ParseError("Error validating SCM after applying scmOverrides: {}".format(str(e)))
# apply scmDefaults
for k, v in recipeSet.scmDefaults().get(spec['scm'], {}).items():
spec.setdefault(k, v)
# create scm instance
return getScm(spec, matchedOverrides, recipeSet)
class CheckoutAssert:
__slots__ = ('__source', '__file', '__digestSHA1', '__start', '__end')
SCHEMA = schema.Schema({
'file' : str,
'digestSHA1' : str,
schema.Optional('start') : schema.And(int, lambda n: n >= 1),
schema.Optional('end') : schema.And(int, lambda n: n >= 1),
})
def __init__(self, spec):
self.__source = spec['__source']
self.__file = spec['file']
self.__digestSHA1 = spec['digestSHA1']
self.__start = spec.get('start', 1)
self.__end = spec.get('end', 0xffffffff)
def getProperties(self):
return {
'__source' : self.__source,
'file' : self.__file,
'digestSHA1' : self.__digestSHA1,
'start' : self.__start,
'end' : self.__end,
}
def getSource(self):
return self.__source
async def invoke(self, invoker):
h = hashlib.sha1()
i = 0
try:
with open(invoker.joinPath(self.__file), "rb") as f:
for line in f:
i += 1
if i < self.__start: continue
if (i == self.__start) or (i <= self.__end): h.update(line)
if i > self.__end: break
d = h.digest().hex()
if d != self.__digestSHA1:
invoker.fail(self.__file, "digest did not match! expected:", self.__digestSHA1, "got:", d)
except OSError as e:
invoker.fail(str(e))
def asDigestScript(self):
return self.__file + " " + self.__digestSHA1 + " " + str(self.__start) + " " + str(self.__end)
class CoreRef:
"""Reference from one CoreStep/CorePackage to another one.
The destination must always be deeper or at the same level in the graph.
The names that are added to the path stack are given in stackAdd. Because
identical "core" sub-graphs can be visible to the user under different
"real" paths we only store the difference between source and destination
to reconstruct the real values on reference resolution.
The real difficulty with these references is the handling of the ambient
tools and the sandbox. Each package has a set of tools and a sandbox
defined as their input. While iterating of the dependencies new tools or a
new sandbox can be picked up, creating a "diff" to the input tools/sandbox
of the package. When later re-creating the real Package/Step classes these
diffs must be applied on refDeref() so that the reference destination gets
the correct ambient tools/sandbox again.
diffTools: A dict. If the value of a tool is "None" the tool is deleted. A
string will copy the tool from an existing "inputTools". Otherwise the
value is expected to the another CoreRef that needs to be dereferenced too.
"""
__slots__ = ('__destination', '__stackAdd', '__diffTools', '__diffSandbox')
def __init__(self, destination, stackAdd=[], diffTools={}, diffSandbox=...):
self.__destination = destination
self.__stackAdd = stackAdd
self.__diffTools = diffTools
self.__diffSandbox = diffSandbox
def refGetDestination(self):
return self.__destination.refGetDestination()
def refGetStack(self):
return self.__stackAdd + self.__destination.refGetStack()
def refDeref(self, stack, inputTools, inputSandbox, pathFormatter, cache=None):
if cache is None: cache = {}
if self.__diffTools:
tools = inputTools.copy()
for (name, tool) in self.__diffTools.items():
if tool is None:
del tools[name]
elif isinstance(tool, str):
tools[name] = inputTools[tool]
else:
coreTool = cache.get(tool)
if coreTool is None:
cache[tool] = coreTool = tool.refDeref(stack, inputTools, inputSandbox, pathFormatter, cache)
tools[name] = coreTool
else:
tools = inputTools
if self.__diffSandbox is ...:
sandbox = inputSandbox
elif self.__diffSandbox is None:
sandbox = None
elif self.__diffSandbox in cache:
sandbox = cache[self.__diffSandbox]
else:
sandbox = self.__diffSandbox.refDeref(stack, inputTools, inputSandbox,
pathFormatter, cache)
cache[self.__diffSandbox] = sandbox
return self.__destination.refDeref(stack + self.__stackAdd, tools, sandbox, pathFormatter)
class CoreItem:
__slots__ = []
def refGetDestination(self):
return self
def refGetStack(self):
return []
def refDeref(self, stack, inputTools, inputSandbox, pathFormatter, cache=None):
raise NotImplementedError
class AbstractTool:
__slots__ = ("path", "libs", "netAccess", "environment",
"fingerprintScript", "fingerprintIf", "fingerprintVars")
def __init__(self, spec):
if isinstance(spec, str):
self.path = spec
self.libs = []
self.netAccess = False
self.environment = {}
self.fingerprintScript = { lang : "" for lang in ScriptLanguage }
self.fingerprintIf = False
self.fingerprintVars = set()
else:
self.path = spec['path']
self.libs = spec.get('libs', [])
self.netAccess = spec.get('netAccess', False)
self.environment = spec.get('environment', {})
self.fingerprintScript = fetchFingerprintScripts(spec)
self.fingerprintIf = spec.get("fingerprintIf")
self.fingerprintVars = set(spec.get("fingerprintVars", []))
def prepare(self, coreStepRef, env):
"""Create concrete tool for given step."""
path = env.substitute(self.path, "provideTools::path")
libs = [ env.substitute(l, "provideTools::libs") for l in self.libs ]
environment = { k : env.substitute(v, "provideTools::environment::"+k)
for k, v in self.environment.items() }
return CoreTool(coreStepRef, path, libs, self.netAccess, environment,
self.fingerprintScript, self.fingerprintIf,
self.fingerprintVars)
class CoreTool(CoreItem):
__slots__ = ("coreStep", "path", "libs", "netAccess", "environment",
"fingerprintScript", "fingerprintIf", "fingerprintVars", "resultId")
def __init__(self, coreStep, path, libs, netAccess, environment,
fingerprintScript, fingerprintIf, fingerprintVars):
self.coreStep = coreStep
self.path = path
self.libs = libs
self.netAccess = netAccess
self.environment = environment
self.fingerprintScript = fingerprintScript
self.fingerprintIf = fingerprintIf
self.fingerprintVars = fingerprintVars
# Calculate a "resultId" so that only identical tools match
h = hashlib.sha1()
h.update(coreStep.variantId)
h.update(struct.pack("<II", len(path), len(libs)))
h.update(path.encode("utf8"))
for l in libs:
h.update(struct.pack("<I", len(l)))
h.update(l.encode('utf8'))
h.update(struct.pack("<?I", netAccess, len(environment)))
for (key, val) in sorted(environment.items()):
h.update(struct.pack("<II", len(key), len(val)))
h.update((key+val).encode('utf8'))
for val in (fingerprintScript[lang] for lang in ScriptLanguage):
h.update(struct.pack("<I", len(val)))
h.update(val.encode('utf8'))
h.update(struct.pack("<I", len(fingerprintVars)))
for key in sorted(fingerprintVars):
h.update(key.encode('utf8'))
fingerprintIfStr = str(fingerprintIf)
h.update(struct.pack("<I", len(fingerprintIfStr)))
h.update(fingerprintIfStr.encode('utf8'))
self.resultId = h.digest()
def refDeref(self, stack, inputTools, inputSandbox, pathFormatter, cache=None):
step = self.coreStep.refDeref(stack, inputTools, inputSandbox, pathFormatter)
return Tool(step, self.path, self.libs, self.netAccess, self.environment,
self.fingerprintScript, self.fingerprintVars)
class Tool:
"""Representation of a tool.
A tool is made of the result of a package, a relative path into this result
and some optional relative library paths.
"""
__slots__ = ("step", "path", "libs", "netAccess", "environment",
"fingerprintScript", "fingerprintVars")
def __init__(self, step, path, libs, netAccess, environment, fingerprintScript,
fingerprintVars):
self.step = step
self.path = path
self.libs = libs
self.netAccess = netAccess
self.environment = environment
self.fingerprintScript = fingerprintScript
self.fingerprintVars = fingerprintVars
def __repr__(self):
return "Tool({}, {}, {})".format(repr(self.step), self.path, self.libs)
def __eq__(self, other):
return isinstance(other, Tool) and (self.step == other.step) and (self.path == other.path) and \
(self.libs == other.libs) and (self.netAccess == other.netAccess) and \
(self.environment == other.environment)
def getStep(self):
"""Return package step that produces the result holding the tool
binaries/scripts.
:return: :class:`bob.input.Step`
"""
return self.step
def getPath(self):
"""Get relative path into the result."""
return self.path
def getLibs(self):
"""Get list of relative library paths into the result.
:return: List[str]
"""
return self.libs
def getNetAccess(self):
"""Does tool require network access?
This reflects the `netAccess` tool property.
:return: bool
"""
return self.netAccess
def getEnvironment(self):
"""Get environment variables.
Returns the dictionary of environment variables that are defined by the
tool.
"""
return self.environment
class CoreSandbox(CoreItem):
__slots__ = ("coreStep", "enabled", "paths", "mounts", "environment",
"resultId")
def __init__(self, coreStep, env, enabled, spec):
recipeSet = coreStep.corePackage.recipe.getRecipeSet()
self.coreStep = coreStep
self.enabled = enabled
self.paths = recipeSet.getSandboxPaths() + spec['paths']
self.mounts = []
for mount in spec.get('mount', []):
m = (env.substitute(mount[0], "provideSandbox::mount-from"),
env.substitute(mount[1], "provideSandbox::mount-to"),
mount[2])
# silently drop empty mount lines
if (m[0] != "") and (m[1] != ""):
self.mounts.append(m)
self.mounts.extend(recipeSet.getSandboxMounts())
self.environment = {
k : env.substitute(v, "providedSandbox::environment")
for (k, v) in spec.get('environment', {}).items()
}
# Calculate a "resultId" so that only identical sandboxes match
h = hashlib.sha1()
h.update(self.coreStep.variantId)
h.update(struct.pack("<I", len(self.paths)))
for p in self.paths:
h.update(struct.pack("<I", len(p)))
h.update(p.encode('utf8'))
h.update(struct.pack("<I", len(self.mounts)))
for (mntFrom, mntTo, mntOpts) in self.mounts:
h.update(struct.pack("<III", len(mntFrom), len(mntTo), len(mntOpts)))
h.update((mntFrom+mntTo+"".join(mntOpts)).encode('utf8'))
h.update(struct.pack("<I", len(self.environment)))
for (key, val) in sorted(self.environment.items()):
h.update(struct.pack("<II", len(key), len(val)))
h.update((key+val).encode('utf8'))
self.resultId = h.digest()
def __eq__(self, other):
return isinstance(other, CoreSandbox) and \
(self.coreStep.variantId == other.coreStep.variantId) and \
(self.enabled == other.enabled) and \
(self.paths == other.paths) and \
(self.mounts == other.mounts) and \
(self.environment == other.environment)
def refDeref(self, stack, inputTools, inputSandbox, pathFormatter, cache=None):
step = self.coreStep.refDeref(stack, inputTools, inputSandbox, pathFormatter)
return Sandbox(step, self)
class Sandbox:
"""Represents a sandbox that is used when executing a step."""
__slots__ = ("step", "coreSandbox")
def __init__(self, step, coreSandbox):
self.step = step
self.coreSandbox = coreSandbox
def __eq__(self, other):
return isinstance(other, Sandbox) and (self.coreSandbox == other.coreSandbox)
def getStep(self):
"""Get the package step that yields the content of the sandbox image."""
return self.step
def getPaths(self):
"""Return list of global search paths.
This is the base $PATH in the sandbox."""
return self.coreSandbox.paths
def getMounts(self):
"""Get custom mounts.
This returns a list of tuples where each tuple has the format
(hostPath, sandboxPath, options).
"""
return self.coreSandbox.mounts
def getEnvironment(self):
"""Get environment variables.
Returns the dictionary of environment variables that are defined by the
sandbox.
"""
return self.coreSandbox.environment
def isEnabled(self):
"""Return True if the sandbox is used in the current build configuration."""
return self.coreSandbox.enabled
class CoreStep(CoreItem):
__slots__ = ( "corePackage", "digestEnv", "env", "args",
"providedEnv", "providedTools", "providedDeps", "providedSandbox",
"variantId", "sbxVarId", "deterministic", "isValid" )
def __init__(self, corePackage, isValid, deterministic, digestEnv, env, args):
self.corePackage = corePackage
self.isValid = isValid
self.digestEnv = digestEnv.detach()
self.env = env.detach()
self.args = args
self.deterministic = deterministic and all(
arg.isDeterministic() for arg in self.getAllDepCoreSteps(True))
self.variantId = self.getDigest(lambda coreStep: coreStep.variantId)
self.providedEnv = {}
self.providedTools = {}
self.providedDeps = []
self.providedSandbox = None
def getPreRunCmds(self):
return []
def getJenkinsPreRunCmds(self):
return []
def getSetupScript(self):
raise NotImplementedError
def getMainScript(self):
raise NotImplementedError
def getPostRunCmds(self):
return []
def getDigestScript(self):
raise NotImplementedError
def getLabel(self):
raise NotImplementedError
def _getToolKeys(self):
"""Return relevant tool names for this CoreStep."""
raise NotImplementedError
def _getToolKeysWeak(self):
"""Return relevant weak tool names for this CoreStep."""
raise NotImplementedError
def isDeterministic(self):
return self.deterministic
def isCheckoutStep(self):
return False
def isBuildStep(self):
return False
def isPackageStep(self):
return False
def getTools(self):
if self.isValid:
toolKeys = self._getToolKeys()
return { name : tool for name,tool in self.corePackage.tools.items()
if name in toolKeys }
else:
return {}
def getSandbox(self, forceSandbox=False):
# Forcing the sandbox is only allowed if sandboxInvariant policy is not
# set or disabled.
forceSandbox = forceSandbox and \
not self.corePackage.recipe.getRecipeSet().sandboxInvariant
sandbox = self.corePackage.sandbox
if sandbox and (sandbox.enabled or forceSandbox) and self.isValid:
return sandbox
else:
return None
def getAllDepCoreSteps(self, forceSandbox=False):
sandbox = self.getSandbox(forceSandbox)
return [ a.refGetDestination() for a in self.args ] + \
[ d.coreStep for n,d in sorted(self.getTools().items()) ] + (
[ sandbox.coreStep] if sandbox else [])
def getDigest(self, calculate, forceSandbox=False):
h = DigestHasher()
if self.isFingerprinted() and self.getSandbox() \
and not self.corePackage.recipe.getRecipeSet().sandboxFingerprints:
h.fingerprint(DigestHasher.sliceRecipes(calculate(self.getSandbox().coreStep)))
sandbox = not self.corePackage.recipe.getRecipeSet().sandboxInvariant and \
self.getSandbox(forceSandbox)
if sandbox:
h.update(DigestHasher.sliceRecipes(calculate(sandbox.coreStep)))
h.update(struct.pack("<I", len(sandbox.paths)))
for p in sandbox.paths:
h.update(struct.pack("<I", len(p)))
h.update(p.encode('utf8'))
else:
h.update(b'\x00' * 20)
script = self.getDigestScript()
if script:
h.update(struct.pack("<I", len(script)))
h.update(script.encode("utf8"))
else:
h.update(b'\x00\x00\x00\x00')
tools = self.getTools()
h.update(struct.pack("<I", len(tools)))
for (name, tool) in sorted(tools.items(), key=lambda t: t[0]):
h.update(DigestHasher.sliceRecipes(calculate(tool.coreStep)))
h.update(struct.pack("<II", len(tool.path), len(tool.libs)))
h.update(tool.path.encode("utf8"))
for l in tool.libs:
h.update(struct.pack("<I", len(l)))
h.update(l.encode('utf8'))
h.update(struct.pack("<I", len(self.digestEnv)))
for (key, val) in sorted(self.digestEnv.items()):
h.update(struct.pack("<II", len(key), len(val)))
h.update((key+val).encode('utf8'))
args = [ arg for arg in (a.refGetDestination() for a in self.args) if arg.isValid ]
h.update(struct.pack("<I", len(args)))
for arg in args:
arg = calculate(arg)
h.update(DigestHasher.sliceRecipes(arg))
h.fingerprint(DigestHasher.sliceHost(arg))
return h.digest()
def getResultId(self):
h = hashlib.sha1()
h.update(self.variantId)
# Include invalid dependencies. They are needed for traversing dummy
# packages without a buildScript in path queries. Valid dependencies
# are already included in the variantId.
args = [ arg for arg in (a.refGetDestination() for a in self.args) if not arg.isValid ]
h.update(struct.pack("<I", len(args)))
for arg in args:
h.update(arg.getResultId())
# Include used sandbox in case sandboxInvariant policy is active.
# Prevents merging of identical packages that are defined under
# different sandboxes.
sandbox = self.corePackage.recipe.getRecipeSet().sandboxInvariant and \
self.getSandbox()
if sandbox:
h.update(sandbox.coreStep.variantId)
h.update(struct.pack("<I", len(sandbox.paths)))
for p in sandbox.paths:
h.update(struct.pack("<I", len(p)))
h.update(p.encode('utf8'))
# Include weak tools for the same reason as above.
weakTools = self._getToolKeysWeak()
for (name, tool) in sorted(self.getTools().items(), key=lambda t: t[0]):
if name in weakTools:
h.update(tool.coreStep.variantId)
h.update(struct.pack("<II", len(tool.path), len(tool.libs)))
h.update(tool.path.encode("utf8"))
for l in tool.libs:
h.update(struct.pack("<I", len(l)))
h.update(l.encode('utf8'))
# providedEnv
h.update(struct.pack("<I", len(self.providedEnv)))
for (key, val) in sorted(self.providedEnv.items()):
h.update(struct.pack("<II", len(key), len(val)))
h.update((key+val).encode('utf8'))
# providedTools
providedTools = self.providedTools
h.update(struct.pack("<I", len(providedTools)))
for (name, tool) in sorted(providedTools.items()):
h.update(struct.pack("<I", len(name)))
h.update(name.encode("utf8"))
h.update(tool.resultId)
# provideDeps
providedDeps = self.providedDeps
h.update(struct.pack("<I", len(providedDeps)))
for dep in providedDeps:
h.update(dep.refGetDestination().variantId)
# sandbox
providedSandbox = self.providedSandbox
if providedSandbox:
h.update(providedSandbox.resultId)
else:
h.update(b'\x00' * 20)
return h.digest()
def getSandboxVariantId(self):
# This is a special variant to calculate the variant-id as if the
# sandbox was enabled. This is used for live build-ids and on the
# jenkins where the build-id of the sandbox must always be calculated.
# But this is all obsolte if the sandboxInvariant policy is enabled.
try:
ret = self.sbxVarId
except AttributeError:
ret = self.sbxVarId = self.getDigest(
lambda step: step.getSandboxVariantId(),
True) if not self.corePackage.recipe.getRecipeSet().sandboxInvariant \
else self.variantId
return ret
@property
def fingerprintMask(self):
raise NotImplementedError
def isFingerprinted(self):
return self.fingerprintMask != 0
@property
def jobServer(self):
return self.corePackage.recipe.jobServer
class Step:
"""Represents the smallest unit of execution of a package.
A step is what gets actually executed when building packages.
Steps can be compared and sorted. This is done based on the Variant-Id of
the step. See :meth:`bob.input.Step.getVariantId` for details.
"""
def __init__(self, coreStep, package, pathFormatter):
self._coreStep = coreStep
self.__package = package
self.__pathFormatter = pathFormatter
def __repr__(self):
return "Step({}, {}, {})".format(self.getLabel(), "/".join(self.getPackage().getStack()), asHexStr(self.getVariantId()))
def __hash__(self):
return hash(self._coreStep.variantId)
def __lt__(self, other):
return self._coreStep.variantId < other._coreStep.variantId
def __le__(self, other):
return self._coreStep.variantId <= other._coreStep.variantId
def __eq__(self, other):
return self._coreStep.variantId == other._coreStep.variantId
def __ne__(self, other):
return self._coreStep.variantId != other._coreStep.variantId
def __gt__(self, other):
return self._coreStep.variantId > other._coreStep.variantId
def __ge__(self, other):
return self._coreStep.variantId >= other._coreStep.variantId
def getPreRunCmds(self):
return self._coreStep.getPreRunCmds()
def getJenkinsPreRunCmds(self):
return self._coreStep.getJenkinsPreRunCmds()
def getScript(self):
"""Return a single big script of the whole step.
Besides considerations of special backends (such as Jenkins) this
script is what should be executed to build this step."""
return joinScripts([self.getSetupScript(), self.getMainScript()],
self.getPackage().getRecipe().scriptLanguage.glue) or ""
def getJenkinsScript(self):
import warnings
warnings.warn("getJenkinsScript is deprecated", DeprecationWarning)
"""Return the relevant parts as shell script that have no Jenkins plugin.
Deprecated. Returns the same script as bob.input.Step.getScript()
"""
return self.getScript()
def getSetupScript(self):
return self._coreStep.getSetupScript()
def getMainScript(self):
return self._coreStep.getMainScript()
def getPostRunCmds(self):
return self._coreStep.getPostRunCmds()
def getDigestScript(self):
"""Return a long term stable script.
The digest script will not be executed but is the basis to calculate if
the step has changed. In case of the checkout step the involved SCMs will
return a stable representation of _what_ is checked out and not the real
script of _how_ this is done.
"""
return self._coreStep.getDigestScript()
def isDeterministic(self):
"""Return whether the step is deterministic.
Checkout steps that have a script are considered indeterministic unless
the recipe declares it otherwise (checkoutDeterministic). Then the SCMs
are checked if they all consider themselves deterministic. Build and
package steps are always deterministic.
The determinism is defined recursively for all arguments, tools and the
sandbox of the step too. That is, the step is only deterministic if all
its dependencies and this step itself is deterministic.
"""
return self._coreStep.isDeterministic()
def isValid(self):
"""Returns True if this step is valid, False otherwise."""
return self._coreStep.isValid
def isCheckoutStep(self):
"""Return True if this is a checkout step."""
return self._coreStep.isCheckoutStep()
def isBuildStep(self):
"""Return True if this is a build step."""
return self._coreStep.isBuildStep()
def isPackageStep(self):
"""Return True if this is a package step."""
return self._coreStep.isPackageStep()
def getPackage(self):
"""Get Package object that is the parent of this Step."""
return self.__package
def getDigest(self, calculate, forceSandbox=False, hasher=DigestHasher,
fingerprint=None, platform=b'', relaxTools=False):
h = hasher()
h.update(platform)
if self._coreStep.isFingerprinted() and self.getSandbox() \
and not self.__package.getRecipe().getRecipeSet().sandboxFingerprints:
h.fingerprint(hasher.sliceRecipes(calculate(self.getSandbox().getStep())))
elif fingerprint:
h.fingerprint(fingerprint)
sandbox = not self.__package.getRecipe().getRecipeSet().sandboxInvariant and \
self.getSandbox(forceSandbox)
if sandbox:
h.update(hasher.sliceRecipes(calculate(sandbox.getStep())))
h.update(struct.pack("<I", len(sandbox.getPaths())))
for p in sandbox.getPaths():
h.update(struct.pack("<I", len(p)))
h.update(p.encode('utf8'))
else:
h.update(b'\x00' * 20)
script = self.getDigestScript()
if script:
h.update(struct.pack("<I", len(script)))
h.update(script.encode("utf8"))
else:
h.update(b'\x00\x00\x00\x00')
tools = self.getTools()
weakTools = self._coreStep._getToolKeysWeak() if relaxTools else []
h.update(struct.pack("<I", len(tools)))
for (name, tool) in sorted(tools.items(), key=lambda t: t[0]):
if name in weakTools:
h.update(name.encode('utf8'))
else:
h.update(hasher.sliceRecipes(calculate(tool.step)))
h.update(struct.pack("<II", len(tool.path), len(tool.libs)))
h.update(tool.path.encode("utf8"))
for l in tool.libs:
h.update(struct.pack("<I", len(l)))
h.update(l.encode('utf8'))
h.update(struct.pack("<I", len(self._coreStep.digestEnv)))
for (key, val) in sorted(self._coreStep.digestEnv.items()):
h.update(struct.pack("<II", len(key), len(val)))
h.update((key+val).encode('utf8'))
args = [ calculate(a) for a in self.getArguments() if a.isValid() ]
h.update(struct.pack("<I", len(args)))
for arg in args:
h.update(hasher.sliceRecipes(arg))
h.fingerprint(hasher.sliceHost(arg))
return h.digest()
async def getDigestCoro(self, calculate, forceSandbox=False, hasher=DigestHasher,
fingerprint=None, platform=b'', relaxTools=False):
h = hasher()
h.update(platform)
if self._coreStep.isFingerprinted() and self.getSandbox() \
and not self.__package.getRecipe().getRecipeSet().sandboxFingerprints:
[d] = await calculate([self.getSandbox().getStep()])
h.fingerprint(hasher.sliceRecipes(d))
elif fingerprint:
h.fingerprint(fingerprint)
sandbox = not self.__package.getRecipe().getRecipeSet().sandboxInvariant and \
self.getSandbox(forceSandbox)
if sandbox:
[d] = await calculate([sandbox.getStep()])
h.update(hasher.sliceRecipes(d))
h.update(struct.pack("<I", len(sandbox.getPaths())))
for p in sandbox.getPaths():
h.update(struct.pack("<I", len(p)))
h.update(p.encode('utf8'))
else:
h.update(b'\x00' * 20)
script = self.getDigestScript()
if script:
h.update(struct.pack("<I", len(script)))
h.update(script.encode("utf8"))
else:
h.update(b'\x00\x00\x00\x00')
tools = self.getTools()
weakTools = self._coreStep._getToolKeysWeak() if relaxTools else []
h.update(struct.pack("<I", len(tools)))
tools = sorted(tools.items(), key=lambda t: t[0])
toolsDigests = await calculate([ tool.step for name,tool in tools ])
for ((name, tool), d) in zip(tools, toolsDigests):
if name in weakTools:
h.update(name.encode('utf8'))
else:
h.update(hasher.sliceRecipes(d))
h.update(struct.pack("<II", len(tool.path), len(tool.libs)))
h.update(tool.path.encode("utf8"))
for l in tool.libs:
h.update(struct.pack("<I", len(l)))
h.update(l.encode('utf8'))
h.update(struct.pack("<I", len(self._coreStep.digestEnv)))
for (key, val) in sorted(self._coreStep.digestEnv.items()):
h.update(struct.pack("<II", len(key), len(val)))
h.update((key+val).encode('utf8'))
args = [ a for a in self.getArguments() if a.isValid() ]
argsDigests = await calculate(args)
h.update(struct.pack("<I", len(args)))
for d in argsDigests:
h.update(hasher.sliceRecipes(d))
h.fingerprint(hasher.sliceHost(d))
return h.digest()
def getVariantId(self):
"""Return Variant-Id of this Step.
The Variant-Id is used to distinguish different packages or multiple
variants of a package. Each Variant-Id need only be built once but
successive builds might yield different results (e.g. when building
from branches)."""
return self._coreStep.variantId
def _getSandboxVariantId(self):
return self._coreStep.getSandboxVariantId()
def getSandbox(self, forceSandbox=False):
"""Return Sandbox used in this Step.
Returns a Sandbox object or None if this Step is built without one.
"""
# Forcing the sandbox is only allowed if sandboxInvariant policy is not
# set or disabled.
forceSandbox = forceSandbox and \
not self.__package.getRecipe().getRecipeSet().sandboxInvariant
sandbox = self.__package._getSandboxRaw()
if sandbox and (sandbox.isEnabled() or forceSandbox) and self._coreStep.isValid:
return sandbox
else:
return None
def getLabel(self):
"""Return path label for step.
This is currently defined as "src", "build" and "dist" for the
respective steps.
"""
return self._coreStep.getLabel()
def getExecPath(self, referrer=None):
"""Return the execution path of the step.
The execution path is where the step is actually run. It may be distinct
from the workspace path if the build is performed in a sandbox. The
``referrer`` is an optional parameter that represents a step that refers
to this step while building.
"""
if self.isValid():
if (referrer or self).getSandbox() is None:
return self.getStoragePath()
else:
return os.path.join("/bob", asHexStr(self.getVariantId()), "workspace")
else:
return "/invalid/exec/path/of/{}".format(self.__package.getName())
def getStoragePath(self):
"""Return the storage path of the step.
The storage path is where the files of the step are stored. For
checkout and build steps this is always the workspace path. But package
steps can be shared globally and thus the directory may lie outside of
the project directoy. The storage path may also change between
invocations if the shared location changes.
"""
if self.isPackageStep() and self.isShared():
return self.__pathFormatter(self, 'storage', self.__package._getStates())
else:
return self.getWorkspacePath()
def getWorkspacePath(self):
"""Return the workspace path of the step.
The workspace path represents the location of the step in the users
workspace. When building in a sandbox this path is not passed to the
script but the one from getExecPath() instead.
"""
if self.isValid():
return self.__pathFormatter(self, 'workspace', self.__package._getStates())
else:
return "/invalid/workspace/path/of/{}".format(self.__package.getName())
def getPaths(self):
"""Get sorted list of execution paths to used tools.
The returned list is intended to be passed as PATH environment variable.
The paths are sorted by name.
"""
return sorted([ os.path.join(tool.step.getExecPath(self), tool.path)
for tool in self.getTools().values() ])
def getLibraryPaths(self):
"""Get sorted list of library paths of used tools.
The returned list is intended to be passed as LD_LIBRARY_PATH environment
variable. The paths are first sorted by tool name. The order of paths of
a single tool is kept.
"""
paths = []
for (name, tool) in sorted(self.getTools().items()):
paths.extend([ os.path.join(tool.step.getExecPath(self), l) for l in tool.libs ])
return paths
def getTools(self):
"""Get dictionary of tools.
The dict maps the tool name to a :class:`bob.input.Tool`.
"""
if self._coreStep.isValid:
toolKeys = self._coreStep._getToolKeys()
return { name : tool for name, tool in self.__package._getAllTools().items()
if name in toolKeys }
else:
return {}
def getArguments(self):
"""Get list of all inputs for this Step.
The arguments are passed as absolute paths to the script starting from $1.
"""
p = self.__package
refCache = {}
return [ a.refDeref(p.getStack(), p._getInputTools(), p._getInputSandboxRaw(),
self.__pathFormatter, refCache)
for a in self._coreStep.args ]
def getAllDepSteps(self, forceSandbox=False):
"""Get all dependent steps of this Step.
This includes the direct input to the Step as well as indirect inputs
such as the used tools or the sandbox.
"""
sandbox = self.getSandbox(forceSandbox)
return self.getArguments() + [ d.step for n,d in sorted(self.getTools().items()) ] + (
[sandbox.getStep()] if sandbox else [])
def getEnv(self):
"""Return dict of environment variables."""
return self._coreStep.env
def doesProvideTools(self):
"""Return True if this step provides at least one tool."""
return bool(self._coreStep.providedTools)
def isShared(self):
"""Returns True if the result of the Step should be shared globally.
The exact behaviour of a shared step/package depends on the build
backend. In general a shared package means that the result is put into
some shared location where it is likely that the same result is needed
again.
"""
return False
def isRelocatable(self):
"""Returns True if the step is relocatable."""
return False
def jobServer(self):
"""Returns True if the jobserver should be used to schedule
builds for this step."""
return self._coreStep.jobServer()
def _getProvidedDeps(self):
p = self.__package
refCache = {}
return [ a.refDeref(p.getStack(), p._getInputTools(), p._getInputSandboxRaw(),
self.__pathFormatter, refCache)
for a in self._coreStep.providedDeps ]
def _isFingerprinted(self):
return self._coreStep.isFingerprinted()
def _getFingerprintScript(self):
"""Generate final fingerprint script.
The used fingerprint scripts of the tools and the recipe/classes are
finally stitched together based on the mask that was calculated in
Recipt.resolveClasses(). For each possible entry there are two bits in
the mask: bit 0 is set if the script is taken unconditionally and bit 1
is set if the script is taken if not empty.
"""
if not self._coreStep.isFingerprinted():
return ""
recipe = self.__package.getRecipe()
mask = self._coreStep.fingerprintMask
tools = self.__package.getPackageStep().getTools()
scriptsAndVars = chain(
((({}, []) if t is None else (t.fingerprintScript, t.fingerprintVars))
for t in (tools.get(k) for k in sorted(recipe.toolDepPackage))),
zip(recipe.fingerprintScriptList, recipe.fingerprintVarsList))
ret = []
varSet = set()
for s,v in scriptsAndVars:
s = s.get(recipe.scriptLanguage.index)
if (mask & 1) or ((mask & 2) and s):
ret.append(s)
varSet.update(v)
mask >>= 2
env = self.getEnv()
if recipe.getRecipeSet().getPolicy('fingerprintVars'):
env = { k : v for k,v in env.items() if k in varSet }
return recipe.scriptLanguage.mangleFingerprints(ret, env)
class CoreCheckoutStep(CoreStep):
__slots__ = ( "scmList" )
def __init__(self, corePackage, checkout=None, checkoutSCMs=[],
fullEnv=Env(), digestEnv=Env(), env=Env(), args=[]):
if checkout:
recipeSet = corePackage.recipe.getRecipeSet()
overrides = recipeSet.scmOverrides()
self.scmList = [ Scm(scm, fullEnv, overrides, recipeSet)
for scm in checkoutSCMs
if fullEnv.evaluate(scm.get("if"), "checkoutSCM") ]
isValid = (checkout[1] is not None) or bool(self.scmList)
# Validate that SCM paths do not overlap
knownPaths = []
for s in self.scmList:
p = s.getDirectory()
if os.path.isabs(p):
raise ParseError("SCM paths must be relative! Offending path: " + p)
for known in knownPaths:
if overlappingPaths(known, p):
raise ParseError("SCM paths '{}' and '{}' overlap."
.format(known, p))
knownPaths.append(p)
else:
isValid = False
self.scmList = []
deterministic = corePackage.recipe.checkoutDeterministic
super().__init__(corePackage, isValid, deterministic, digestEnv, env, args)
def _getToolKeys(self):
return self.corePackage.recipe.toolDepCheckout
def _getToolKeysWeak(self):
return self.corePackage.recipe.toolDepCheckoutWeak
def refDeref(self, stack, inputTools, inputSandbox, pathFormatter, cache=None):
package = self.corePackage.refDeref(stack, inputTools, inputSandbox, pathFormatter)
ret = CheckoutStep(self, package, pathFormatter)
package._setCheckoutStep(ret)
return ret
def getLabel(self):
return "src"
def isDeterministic(self):
return super().isDeterministic() and all(s.isDeterministic() for s in self.scmList)
def hasLiveBuildId(self):
return super().isDeterministic() and all(s.hasLiveBuildId() for s in self.scmList)
def isCheckoutStep(self):
return True
def getPreRunCmds(self):
return [s.getProperties(False) for s in self.scmList]
def getJenkinsPreRunCmds(self):
return [ s.getProperties(True) for s in self.scmList if not s.hasJenkinsPlugin() ]
def getSetupScript(self):
return self.corePackage.recipe.checkoutSetupScript
def getMainScript(self):
return self.corePackage.recipe.checkoutMainScript
def getPostRunCmds(self):
return [s.getProperties() for s in self.corePackage.recipe.checkoutAsserts]
def getDigestScript(self):
if self.isValid:
recipe = self.corePackage.recipe
return "\n".join([s.asDigestScript() for s in self.scmList]
+ [recipe.checkoutDigestScript]
+ [s.asDigestScript() for s in recipe.checkoutAsserts])
else:
return None
@property
def fingerprintMask(self):
return 0
class CheckoutStep(Step):
def getJenkinsXml(self, credentials, options):
return [ s.asJenkins(self.getWorkspacePath(), credentials, options)
for s in self._coreStep.scmList if s.hasJenkinsPlugin() ]
def getScmList(self):
return self._coreStep.scmList
def getScmDirectories(self):
dirs = {}
for s in self._coreStep.scmList:
dirs[s.getDirectory()] = (hashString(s.asDigestScript()), s.getProperties(False))
return dirs
def hasLiveBuildId(self):
"""Check if live build-ids are supported.
This must be supported by all SCMs. Additionally the checkout script
must be deterministic.
"""
return self._coreStep.hasLiveBuildId()
async def predictLiveBuildId(self):
"""Query server to predict live build-id.
Returns the live-build-id or None if an SCM query failed.
"""
if not self.hasLiveBuildId():
return None
h = hashlib.sha1()
h.update(getPlatformTag())
h.update(self._getSandboxVariantId())
for s in self._coreStep.scmList:
liveBId = await s.predictLiveBuildId(self)
if liveBId is None: return None
h.update(liveBId)
return h.digest()
def calcLiveBuildId(self):
"""Calculate live build-id from workspace."""
if not self.hasLiveBuildId():
return None
workspacePath = self.getWorkspacePath()
h = hashlib.sha1()
h.update(getPlatformTag())
h.update(self._getSandboxVariantId())
for s in self._coreStep.scmList:
liveBId = s.calcLiveBuildId(workspacePath)
if liveBId is None: return None
h.update(liveBId)
return h.digest()
def getLiveBuildIdSpec(self):
"""Generate spec lines for bob-hash-engine.
May return None if an SCM does not support live-build-ids on Jenkins.
"""
if not self.hasLiveBuildId():
return None
workspacePath = self.getWorkspacePath()
lines = [ "{sha1", "p", "=" + asHexStr(self._getSandboxVariantId()) ]
for s in self._coreStep.scmList:
liveBIdSpec = s.getLiveBuildIdSpec(workspacePath)
if liveBIdSpec is None: return None
lines.append(liveBIdSpec)
lines.append("}")
return "\n".join(lines)
def hasNetAccess(self):
return True
class CoreBuildStep(CoreStep):
__slots__ = []
def __init__(self, corePackage, script=(None, None, None), digestEnv=Env(), env=Env(), args=[]):
isValid = script[1] is not None
super().__init__(corePackage, isValid, True, digestEnv, env, args)
def _getToolKeys(self):
return self.corePackage.recipe.toolDepBuild
def _getToolKeysWeak(self):
return self.corePackage.recipe.toolDepBuildWeak
def refDeref(self, stack, inputTools, inputSandbox, pathFormatter, cache=None):
package = self.corePackage.refDeref(stack, inputTools, inputSandbox, pathFormatter)
ret = BuildStep(self, package, pathFormatter)
package._setBuildStep(ret)
return ret
def getLabel(self):
return "build"
def isBuildStep(self):
return True
def getSetupScript(self):
return self.corePackage.recipe.buildSetupScript
def getMainScript(self):
return self.corePackage.recipe.buildMainScript
def getDigestScript(self):
return self.corePackage.recipe.buildDigestScript
@property
def fingerprintMask(self):
# Remove bits of all tools that are not used in buildStep
ret = self.corePackage.fingerprintMask
i = 3
ourToolKeys = self.corePackage.recipe.toolDepBuild
packageToolKeys = self.corePackage.recipe.toolDepPackage
for t in sorted(packageToolKeys):
if t not in ourToolKeys:
ret &= ~i
i <<= 2
return ret
class BuildStep(Step):
def hasNetAccess(self):
return self.getPackage().getRecipe()._getBuildNetAccess() or any(
t.getNetAccess() for t in self.getTools().values())
class CorePackageStep(CoreStep):
__slots__ = []
def __init__(self, corePackage, script=(None, None, None), digestEnv=Env(), env=Env(), args=[]):
isValid = script[1] is not None
super().__init__(corePackage, isValid, True, digestEnv, env, args)
def _getToolKeys(self):
return self.corePackage.recipe.toolDepPackage
def _getToolKeysWeak(self):
return self.corePackage.recipe.toolDepPackageWeak
def refDeref(self, stack, inputTools, inputSandbox, pathFormatter, cache=None):
package = self.corePackage.refDeref(stack, inputTools, inputSandbox, pathFormatter)
ret = PackageStep(self, package, pathFormatter)
package._setPackageStep(ret)
return ret
def getLabel(self):
return "dist"
def isPackageStep(self):
return True
def getSetupScript(self):
return self.corePackage.recipe.packageSetupScript
def getMainScript(self):
return self.corePackage.recipe.packageMainScript
def getDigestScript(self):
return self.corePackage.recipe.packageDigestScript
@property
def fingerprintMask(self):
return self.corePackage.fingerprintMask
class PackageStep(Step):
def isShared(self):
"""Determine if the PackageStep be shared.
Requires the recipe to be marked as shared and the result must be
position independent.
"""
return self.getPackage().getRecipe().isShared() and self.isRelocatable()
def isRelocatable(self):
"""Returns True if the package step is relocatable."""
return self.getPackage().isRelocatable()
def hasNetAccess(self):
return self.getPackage().getRecipe()._getPackageNetAccess() or any(
t.getNetAccess() for t in self.getTools().values())
class CorePackageInternal(CoreItem):
__slots__ = []
def refDeref(self, stack, inputTools, inputSandbox, pathFormatter, cache=None):
return (inputTools, inputSandbox)
corePackageInternal = CorePackageInternal()
class CorePackage:
__slots__ = ("recipe", "internalRef", "directDepSteps", "indirectDepSteps",
"states", "tools", "sandbox", "checkoutStep", "buildStep", "packageStep",
"pkgId", "fingerprintMask")
def __init__(self, recipe, tools, diffTools, sandbox, diffSandbox,
directDepSteps, indirectDepSteps, states, pkgId, fingerprintMask):
self.recipe = recipe
self.tools = tools
self.sandbox = sandbox
self.internalRef = CoreRef(corePackageInternal, [], diffTools, diffSandbox)
self.directDepSteps = directDepSteps
self.indirectDepSteps = indirectDepSteps
self.states = states
self.pkgId = pkgId
self.fingerprintMask = fingerprintMask
def refDeref(self, stack, inputTools, inputSandbox, pathFormatter):
tools, sandbox = self.internalRef.refDeref(stack, inputTools, inputSandbox, pathFormatter)
return Package(self, stack, pathFormatter, inputTools, tools, inputSandbox, sandbox)
def createCoreCheckoutStep(self, checkout, checkoutSCMs, fullEnv, digestEnv, env, args):
ret = self.checkoutStep = CoreCheckoutStep(self, checkout, checkoutSCMs, fullEnv, digestEnv, env, args)
return ret
def createInvalidCoreCheckoutStep(self):
ret = self.checkoutStep = CoreCheckoutStep(self)
return ret
def createCoreBuildStep(self, script, digestEnv, env, args):
ret = self.buildStep = CoreBuildStep(self, script, digestEnv, env, args)
return ret
def createInvalidCoreBuildStep(self, args):
ret = self.buildStep = CoreBuildStep(self, args=args)
return ret
def createCorePackageStep(self, script, digestEnv, env, args):
ret = self.packageStep = CorePackageStep(self, script, digestEnv, env, args)
return ret
def getCorePackageStep(self):
return self.packageStep
def getName(self):
"""Name of the package"""
return self.recipe.getPackageName()
@property
def jobServer(self):
return self.recipe.jobServer()
class Package(object):
"""Representation of a package that was created from a recipe.
Usually multiple packages will be created from a single recipe. This is
either due to multiple upstream recipes or different variants of the same
package. This does not preclude the possibility that multiple Package
objects describe exactly the same package (read: same Variant-Id). It is
the responsibility of the build backend to detect this and build only one
package.
"""
def __init__(self, corePackage, stack, pathFormatter, inputTools, tools, inputSandbox, sandbox):
self.__corePackage = corePackage
self.__stack = stack
self.__pathFormatter = pathFormatter
self.__inputTools = inputTools
self.__tools = tools
self.__inputSandbox = inputSandbox
self.__sandbox = sandbox
def __eq__(self, other):
return isinstance(other, Package) and (self.__stack == other.__stack)
def _getId(self):
"""The package-Id is uniquely representing every package variant.
On the package level there might be more dependencies than on the step
level. Meta variables are usually unused and also do not contribute to
the variant-id. The package-id still guarantees to not collide in these
cases. OTOH there can be identical packages with different ids, though
it should be an unusual case.
"""
return self.__corePackage.pkgId
def _getInputTools(self):
return self.__inputTools
def _getAllTools(self):
return self.__tools
def _getInputSandboxRaw(self):
return self.__inputSandbox
def _getSandboxRaw(self):
return self.__sandbox
def getName(self):
"""Name of the package"""
return self.getRecipe().getPackageName()
def getMetaEnv(self):
"""meta variables of package"""
return self.getRecipe().getMetaEnv()
def getStack(self):
"""Returns the recipe processing stack leading to this package.
The method returns a list of package names. The first entry is a root
recipe and the last entry is this package."""
return self.__stack
def getRecipe(self):
"""Return Recipe object that was the template for this package."""
return self.__corePackage.recipe
def getDirectDepSteps(self):
"""Return list to the package steps of the direct dependencies.
Direct dependencies are the ones that are named explicitly in the
``depends`` section of the recipe. The order of the items is
preserved from the recipe.
"""
refCache = {}
return [ d.refDeref(self.__stack, self.__inputTools, self.__inputSandbox,
self.__pathFormatter, refCache)
for d in self.__corePackage.directDepSteps ]
def getIndirectDepSteps(self):
"""Return list of indirect dependencies of the package.
Indirect dependencies are dependencies that were provided by downstream
recipes. They are not directly named in the recipe.
"""
refCache = {}
return [ d.refDeref(self.__stack, self.__inputTools, self.__inputSandbox,
self.__pathFormatter, refCache)
for d in self.__corePackage.indirectDepSteps ]
def getAllDepSteps(self, forceSandbox=False):
"""Return list of all dependencies of the package.
This list includes all direct and indirect dependencies. Additionally
the used sandbox and tools are included too."""
# Forcing the sandbox is only allowed if sandboxInvariant policy is not
# set or disabled.
forceSandbox = forceSandbox and \
not self.getRecipe().getRecipeSet().sandboxInvariant
allDeps = set(self.getDirectDepSteps())
allDeps |= set(self.getIndirectDepSteps())
if self.__sandbox and (self.__sandbox.isEnabled() or forceSandbox):
allDeps.add(self.__sandbox.getStep())
for i in self.getPackageStep().getTools().values(): allDeps.add(i.getStep())
return sorted(allDeps)
def _setCheckoutStep(self, checkoutStep):
self.__checkoutStep = checkoutStep
def getCheckoutStep(self):
"""Return the checkout step of this package."""
try:
ret = self.__checkoutStep
except AttributeError:
ret = self.__checkoutStep = CheckoutStep(self.__corePackage.checkoutStep,
self, self.__pathFormatter)
return ret
def _setBuildStep(self, buildStep):
self.__buildStep = buildStep
def getBuildStep(self):
"""Return the build step of this package."""
try:
ret = self.__buildStep
except AttributeError:
ret = self.__buildStep = BuildStep(self.__corePackage.buildStep,
self, self.__pathFormatter)
return ret
def _setPackageStep(self, packageStep):
self.__packageStep = packageStep
def getPackageStep(self):
"""Return the package step of this package."""
try:
ret = self.__packageStep
except AttributeError:
ret = self.__packageStep = PackageStep(self.__corePackage.packageStep,
self, self.__pathFormatter)
return ret
def _getStates(self):
return self.__corePackage.states
def isRelocatable(self):
"""Returns True if the packages is relocatable."""
return self.__corePackage.recipe.isRelocatable()
# FIXME: implement this on our own without the Template class. How to do proper
# escaping?
class IncludeHelper:
def __init__(self, scriptLanguage, fileLoader, baseDir, varBase, sourceName):
self.__pattern = re.compile(r"""
\$<(?:
(?P<escaped>\$) |
(?P<named>[<'][^'>]+)['>]> |
(?P<braced>[<'][^'>]+)['>]> |
(?P<invalid>)
)
""", re.VERBOSE)
self.__resolverClass = scriptLanguage.Resolver
self.__baseDir = baseDir
self.__varBase = re.sub(r'[^a-zA-Z0-9_]', '_', varBase, flags=re.DOTALL)
self.__fileLoader = fileLoader
self.__sourceName = sourceName
def resolve(self, text, section):
if isinstance(text, str):
resolver = self.__resolverClass(self.__fileLoader, self.__baseDir,
text, self.__sourceName, self.__varBase)
t = Template(text)
t.delimiter = '$<'
t.pattern = self.__pattern
try:
ret = t.substitute(resolver)
except ValueError as e:
raise ParseError("Bad substiturion in {}: {}".format(section, str(e)))
return resolver.resolve(ret)
else:
return (None, None)
def mergeFilter(left, right):
if left is None:
return right
if right is None:
return left
return left + right
class ScmValidator:
def __init__(self, scmSpecs):
self.__scmSpecs = scmSpecs
def __validateScm(self, scm):
if 'scm' not in scm:
raise schema.SchemaMissingKeyError("Missing 'scm' key in {}".format(scm), None)
if scm['scm'] not in self.__scmSpecs.keys():
raise schema.SchemaWrongKeyError('Invalid SCM: {}'.format(scm['scm']), None)
return self.__scmSpecs[scm['scm']].validate(scm)
def validate(self, data):
if isinstance(data, dict):
data = [self.__validateScm(data)]
elif isinstance(data, list):
for i in data: self.__validateScm(i)
else:
raise schema.SchemaUnexpectedTypeError(
'checkoutSCM must be a SCM spec or a list threreof',
None)
return data
class VarDefineValidator:
def __init__(self, keyword):
self.__varName = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$')
self.__keyword = keyword
def validate(self, data):
if not isinstance(data, dict):
raise schema.SchemaUnexpectedTypeError(
"{}: must be a dictionary".format(self.__keyword), None)
for key,value in sorted(data.items()):
if not isinstance(key, str):
raise schema.SchemaUnexpectedTypeError(
"{}: bad variable '{}'. Environment variable names must be strings!"
.format(self.__keyword, key),
None)
if key.startswith("BOB_"):
raise schema.SchemaWrongKeyError(
"{}: bad variable '{}'. Environment variables starting with 'BOB_' are reserved!"
.format(self.__keyword, key),
None)
if self.__varName.match(key) is None:
raise schema.SchemaWrongKeyError(
"{}: bad variable name '{}'.".format(self.__keyword, key),
None)
if not isinstance(value, str):
raise schema.SchemaUnexpectedTypeError(
"{}: bad variable '{}'. Environment variable values must be strings!"
.format(self.__keyword, key),
None)
return data
RECIPE_NAME_SCHEMA = schema.Regex(r'^[0-9A-Za-z_.+-]+$')
MULTIPACKAGE_NAME_SCHEMA = schema.Regex(r'^[0-9A-Za-z_.+-]*$')
class UniquePackageList:
def __init__(self, stack, errorHandler):
self.stack = stack
self.errorHandler = errorHandler
self.ret = []
self.cache = {}
def append(self, ref):
step = ref.refGetDestination()
name = step.corePackage.getName()
ref2 = self.cache.get(name)
if ref2 is None:
self.cache[name] = ref
self.ret.append(ref)
elif ref2.refGetDestination().variantId != step.variantId:
self.errorHandler(name, self.stack + ref.refGetStack(), self.stack + ref2.refGetStack())
def extend(self, gen):
for i in gen: self.append(i)
def result(self):
return self.ret
class DepTracker:
__slots__ = ('item', 'isNew', 'usedResult')
def __init__(self, item):
self.item = item
self.isNew = True
self.usedResult = False
def prime(self):
if self.isNew:
self.isNew = False
return True
else:
return False
def useResultOnce(self):
if self.usedResult:
return False
else:
self.usedResult = True
return True
class Recipe(object):
"""Representation of a single recipe
Multiple instaces of this class will be created if the recipe used the
``multiPackage`` keyword. In this case the getName() method will return
the name of the original recipe but the getPackageName() method will return
it with some addition suffix. Without a ``multiPackage`` keyword there will
only be one Recipe instance.
"""
class Dependency(object):
def __init__(self, recipe, env, fwd, use, cond, tools, checkoutDep):
self.recipe = recipe
self.envOverride = env
self.provideGlobal = fwd
self.use = use
self.useEnv = "environment" in self.use
self.useTools = "tools" in self.use
self.useBuildResult = "result" in self.use
self.useDeps = "deps" in self.use
self.useSandbox = "sandbox" in self.use
self.condition = cond
self.toolOverride = tools
self.checkoutDep = checkoutDep
@staticmethod
def __parseEntry(dep, env, fwd, use, cond, tools, checkoutDep):
if isinstance(dep, str):
return [ Recipe.Dependency(dep, env, fwd, use, cond, tools, checkoutDep) ]
else:
envOverride = dep.get("environment")
if envOverride:
env = env.copy()
env.update(envOverride)
toolOverride = dep.get("tools")
if toolOverride:
tools = tools.copy()
tools.update(toolOverride)
fwd = dep.get("forward", fwd)
use = dep.get("use", use)
newCond = dep.get("if")
if newCond is not None:
cond = cond + [newCond] if cond is not None else [ newCond ]
checkoutDep = dep.get("checkoutDep", checkoutDep)
name = dep.get("name")
if name:
if "depends" in dep:
raise ParseError("A dependency must not use 'name' and 'depends' at the same time!")
return [ Recipe.Dependency(name, env, fwd, use, cond, tools, checkoutDep) ]
dependencies = dep.get("depends")
if dependencies is None:
raise ParseError("Either 'name' or 'depends' required for dependencies!")
return Recipe.Dependency.parseEntries(dependencies, env, fwd,
use, cond, tools,
checkoutDep)
@staticmethod
def parseEntries(deps, env={}, fwd=False, use=["result", "deps"],
cond=None, tools={}, checkoutDep=False):
"""Returns an iterator yielding all dependencies as flat list"""
# return flattened list of dependencies
return chain.from_iterable(
Recipe.Dependency.__parseEntry(dep, env, fwd, use, cond, tools,
checkoutDep)
for dep in deps )
@staticmethod
def loadFromFile(recipeSet, layer, rootDir, fileName, properties, fileSchema,
isRecipe, scriptLanguage=None):
# MultiPackages are handled as separate recipes with an anonymous base
# class. Directories are treated as categories separated by '::'.
baseName = os.path.splitext( fileName )[0].split( os.sep )
fileName = os.path.join(rootDir, fileName)
try:
for n in baseName: RECIPE_NAME_SCHEMA.validate(n)
except schema.SchemaError as e:
raise ParseError("Invalid recipe name: '{}'".format(fileName))
baseName = "::".join( baseName )
baseDir = os.path.dirname(fileName)
nameMap = {}
def anonNameCalculator(suffix):
num = nameMap.setdefault(suffix, 0) + 1
nameMap[suffix] = num
return baseName + suffix + "#" + str(num)
def collect(recipe, suffix, anonBaseClass):
if "multiPackage" in recipe:
anonBaseClass = Recipe(recipeSet, recipe, layer, fileName, baseDir,
anonNameCalculator(suffix), baseName, properties, isRecipe,
anonBaseClass)
return chain.from_iterable(
collect(subSpec, suffix + ("-"+subName if subName else ""),
anonBaseClass)
for (subName, subSpec) in recipe["multiPackage"].items() )
else:
packageName = baseName + suffix
return [ Recipe(recipeSet, recipe, layer, fileName, baseDir, packageName,
baseName, properties, isRecipe, anonBaseClass, scriptLanguage) ]
return list(collect(recipeSet.loadYaml(fileName, fileSchema), "", None))
@staticmethod
def createVirtualRoot(recipeSet, roots, properties):
recipe = {
"depends" : [
{ "name" : name, "use" : ["result"] } for name in roots
],
"buildScript" : "true",
"packageScript" : "true"
}
ret = Recipe(recipeSet, recipe, [], "", ".", "", "", properties)
ret.resolveClasses(Env())
return ret
def __init__(self, recipeSet, recipe, layer, sourceFile, baseDir, packageName, baseName,
properties, isRecipe=True, anonBaseClass=None, scriptLanguage=ScriptLanguage.BASH):
self.__recipeSet = recipeSet
self.__sources = [ sourceFile ] if anonBaseClass is None else []
self.__classesResolved = False
self.__inherit = recipe.get("inherit", [])
self.__anonBaseClass = anonBaseClass
self.__defaultScriptLanguage = scriptLanguage
self.__deps = list(Recipe.Dependency.parseEntries(recipe.get("depends", [])))
filt = recipe.get("filter", {})
if filt: warnFilter.warn(baseName)
self.__filterEnv = maybeGlob(filt.get("environment"))
self.__filterTools = maybeGlob(filt.get("tools"))
self.__filterSandbox = maybeGlob(filt.get("sandbox"))
self.__packageName = packageName
self.__baseName = baseName
self.__root = recipe.get("root")
self.__provideTools = { name : AbstractTool(spec)
for (name, spec) in recipe.get("provideTools", {}).items() }
self.__provideVars = recipe.get("provideVars", {})
self.__provideDeps = set(recipe.get("provideDeps", []))
self.__provideSandbox = recipe.get("provideSandbox")
self.__varSelf = recipe.get("environment", {})
self.__varPrivate = recipe.get("privateEnvironment", {})
self.__metaEnv = recipe.get("metaEnvironment", {})
self.__checkoutDeterministic = recipe.get("checkoutDeterministic")
self.__checkoutVars = set(recipe.get("checkoutVars", []))
self.__checkoutVarsWeak = set(recipe.get("checkoutVarsWeak", []))
self.__buildVars = set(recipe.get("buildVars", []))
self.__buildVars |= self.__checkoutVars
self.__buildVarsWeak = set(recipe.get("buildVarsWeak", []))
self.__buildVarsWeak |= self.__checkoutVarsWeak
self.__packageVars = set(recipe.get("packageVars", []))
self.__packageVars |= self.__buildVars
self.__packageVarsWeak = set(recipe.get("packageVarsWeak", []))
self.__packageVarsWeak |= self.__buildVarsWeak
self.__toolDepCheckout = set(recipe.get("checkoutTools", []))
self.__toolDepCheckoutWeak = set(recipe.get("checkoutToolsWeak", []))
self.__toolDepBuild = set(recipe.get("buildTools", []))
self.__toolDepBuild |= self.__toolDepCheckout
self.__toolDepBuildWeak = set(recipe.get("buildToolsWeak", []))
self.__toolDepBuildWeak |= self.__toolDepCheckoutWeak
self.__toolDepPackage = set(recipe.get("packageTools", []))
self.__toolDepPackage |= self.__toolDepBuild
self.__toolDepPackageWeak = set(recipe.get("packageToolsWeak", []))
self.__toolDepPackageWeak |= self.__toolDepBuildWeak
self.__shared = recipe.get("shared")
self.__relocatable = recipe.get("relocatable")
self.__jobServer = recipe.get("jobServer")
self.__properties = {
n : p(n in recipe, recipe.get(n))
for (n, p) in properties.items()
}
self.__corePackagesByMatch = []
self.__corePackagesById = {}
self.__layer = layer
sourceName = ("Recipe " if isRecipe else "Class ") + packageName + (
", layer "+"/".join(layer) if layer else "")
incHelperBash = IncludeHelper(BashLanguage, recipeSet.loadBinary,
baseDir, packageName, sourceName).resolve
incHelperPwsh = IncludeHelper(PwshLanguage, recipeSet.loadBinary,
baseDir, packageName, sourceName).resolve
self.__scriptLanguage = recipe.get("scriptLanguage")
self.__checkout = fetchScripts(recipe, "checkout", incHelperBash, incHelperPwsh)
self.__checkoutSCMs = recipe.get("checkoutSCM", [])
for scm in self.__checkoutSCMs:
scm["__source"] = sourceName
scm["recipe"] = sourceFile
self.__checkoutAsserts = recipe.get("checkoutAssert", [])
i = 0
for a in self.__checkoutAsserts:
a["__source"] = sourceName + ", checkoutAssert #{}".format(i)
i += 1
self.__build = fetchScripts(recipe, "build", incHelperBash, incHelperPwsh)
self.__package = fetchScripts(recipe, "package", incHelperBash, incHelperPwsh)
self.__fingerprintScriptList = fetchFingerprintScripts(recipe)
self.__fingerprintIf = recipe.get("fingerprintIf")
self.__fingerprintVarsList = set(recipe.get("fingerprintVars", []))
self.__buildNetAccess = recipe.get("buildNetAccess")
self.__packageNetAccess = recipe.get("packageNetAccess")
def __resolveClassesOrder(self, cls, stack, visited, isRecipe=False):
# prevent cycles
clsName = "<recipe>" if isRecipe else cls.__packageName
if clsName in stack:
raise ParseError("Cyclic class inheritence: " + " -> ".join(stack + [clsName]))
# depth first
ret = []
subInherit = [ self.__recipeSet.getClass(c) for c in cls.__inherit ]
if cls.__anonBaseClass: subInherit.insert(0, cls.__anonBaseClass)
for c in subInherit:
ret.extend(self.__resolveClassesOrder(c, stack + [clsName], visited))
# classes are inherited only once
if (clsName not in visited) and not isRecipe:
ret.append(cls)
visited.add(clsName)
return ret
def getLayer(self):
"""Get layer to which this recipe belongs.
Returns a list of the layer hierarchy. The root layer is represented
by an empty list. If the recipe belongs to a nested layer the layers
are named from top to bottom. Example:
``layers/foo/layers/bar/recipes/baz.yaml`` -> ``['foo', 'bar']``.
:rtype: List[str]
"""
return self.__layer
def resolveClasses(self, rootEnv):
# must be done only once
if self.__classesResolved: return
self.__classesResolved = True
# calculate order of classes (depth first) but ignore ourself
inherit = self.__resolveClassesOrder(self, [], set(), True)
inheritAll = inherit + [self]
# prepare environment merge list
mergeEnvironment = self.__recipeSet.getPolicy('mergeEnvironment')
if mergeEnvironment:
self.__varSelf = [ self.__varSelf ] if self.__varSelf else []
self.__varPrivate = [ self.__varPrivate ] if self.__varPrivate else []
# first pass: calculate used scripting language
scriptLanguage = None
for cls in reversed(inheritAll):
if scriptLanguage is not None: break
scriptLanguage = cls.__scriptLanguage
if scriptLanguage is None:
self.__scriptLanguage = self.__defaultScriptLanguage
else:
self.__scriptLanguage = scriptLanguage
glue = getLanguage(self.__scriptLanguage).glue
# Consider checkout deterministic by default if no checkoutScript is
# involved. A potential checkoutSetup is ignored.
def coDet(r):
ret = r.__checkoutDeterministic
if ret is not None:
return ret
return r.__checkout[self.__scriptLanguage][1][0] is None
self.__checkoutDeterministic = all(coDet(i) for i in inheritAll)
# merge scripts and other lists
selLang = lambda x: x[self.__scriptLanguage]
# Join all scripts. The result is a tuple with (setupScript, mainScript, digestScript)
self.__checkout = mergeScripts([ selLang(i.__checkout) for i in inheritAll ], glue)
self.__checkoutSCMs = list(chain.from_iterable(i.__checkoutSCMs for i in inheritAll))
self.__checkoutAsserts = list(chain.from_iterable(i.__checkoutAsserts for i in inheritAll))
self.__build = mergeScripts([ selLang(i.__build) for i in inheritAll ], glue)
self.__package = mergeScripts([ selLang(i.__package) for i in inheritAll ], glue)
self.__fingerprintScriptList = [ i.__fingerprintScriptList for i in inheritAll ]
self.__fingerprintVarsList = [ i.__fingerprintVarsList for i in inheritAll ]
self.__fingerprintIf = [ i.__fingerprintIf for i in inheritAll ]
# inherit classes
for cls in reversed(inherit):
self.__sources.extend(cls.__sources)
self.__deps[0:0] = cls.__deps
self.__filterEnv = mergeFilter(self.__filterEnv, cls.__filterEnv)
self.__filterTools = mergeFilter(self.__filterTools, cls.__filterTools)
self.__filterSandbox = mergeFilter(self.__filterSandbox, cls.__filterSandbox)
if self.__root is None: self.__root = cls.__root
if self.__shared is None: self.__shared = cls.__shared
if self.__relocatable is None: self.__relocatable = cls.__relocatable
if self.__jobServer is None: self.__jobServer = cls.__jobServer
tmp = cls.__provideTools.copy()
tmp.update(self.__provideTools)
self.__provideTools = tmp
tmp = cls.__provideVars.copy()
tmp.update(self.__provideVars)
self.__provideVars = tmp
self.__provideDeps |= cls.__provideDeps
if self.__provideSandbox is None: self.__provideSandbox = cls.__provideSandbox
if mergeEnvironment:
if cls.__varSelf: self.__varSelf.insert(0, cls.__varSelf)
if cls.__varPrivate: self.__varPrivate.insert(0, cls.__varPrivate)
else:
tmp = cls.__varSelf.copy()
tmp.update(self.__varSelf)
self.__varSelf = tmp
tmp = cls.__varPrivate.copy()
tmp.update(self.__varPrivate)
self.__varPrivate = tmp
self.__checkoutVars |= cls.__checkoutVars
tmp = cls.__metaEnv.copy()
tmp.update(self.__metaEnv)
self.__metaEnv = tmp
self.__checkoutVarsWeak |= cls.__checkoutVarsWeak
self.__buildVars |= cls.__buildVars
self.__buildVarsWeak |= cls.__buildVarsWeak
self.__packageVars |= cls.__packageVars
self.__packageVarsWeak |= cls.__packageVarsWeak
self.__toolDepCheckout |= cls.__toolDepCheckout
self.__toolDepCheckoutWeak |= cls.__toolDepCheckoutWeak
self.__toolDepBuild |= cls.__toolDepBuild
self.__toolDepBuildWeak |= cls.__toolDepBuildWeak
self.__toolDepPackage |= cls.__toolDepPackage
self.__toolDepPackageWeak |= cls.__toolDepPackageWeak
if self.__buildNetAccess is None: self.__buildNetAccess = cls.__buildNetAccess
if self.__packageNetAccess is None: self.__packageNetAccess = cls.__packageNetAccess
for (n, p) in self.__properties.items():
p.inherit(cls.__properties[n])
# finalize environment merge list
if not mergeEnvironment:
self.__varSelf = [ self.__varSelf ] if self.__varSelf else []
self.__varPrivate = [ self.__varPrivate ] if self.__varPrivate else []
# the package step must always be valid
if self.__package[1] is None:
self.__package = (None, "", 'da39a3ee5e6b4b0d3255bfef95601890afd80709')
# final shared value
self.__shared = self.__shared == True
# Only keep weak tools that are not strong at the same time.
self.__toolDepCheckoutWeak -= self.__toolDepCheckout
self.__toolDepCheckout |= self.__toolDepCheckoutWeak
self.__toolDepBuildWeak -= self.__toolDepBuild
self.__toolDepBuild |= self.__toolDepBuildWeak
self.__toolDepPackageWeak -= self.__toolDepPackage
self.__toolDepPackage |= self.__toolDepPackageWeak
# Either 'relocatable' was set in the recipe/class(es) or it defaults
# to True unless a tool is defined. This was the legacy behaviour
# before Bob 0.14. If the allRelocatable policy is enabled we always
# default to True.
if self.__relocatable is None:
self.__relocatable = self.__recipeSet.getPolicy('allRelocatable') \
or not self.__provideTools
if self.__jobServer is None:
self.__jobServer = False
# check provided dependencies
availDeps = [ d.recipe for d in self.__deps ]
providedDeps = set()
for pattern in self.__provideDeps:
l = set(d for d in availDeps if fnmatch.fnmatchcase(d, pattern))
if not l:
raise ParseError("Unknown dependency '{}' in provideDeps".format(pattern))
providedDeps |= l
self.__provideDeps = providedDeps
# Evaluate root property
if isinstance(self.__root, str) or isinstance(self.__root, IfExpression):
self.__root = rootEnv.evaluate(self.__root, "root")
def getRecipeSet(self):
"""Get the :class:`RecipeSet` to which the recipe belongs"""
return self.__recipeSet
def getSources(self):
return self.__sources
def getPackageName(self):
"""Get the name of the package that is drived from this recipe.
Usually the package name is the same as the recipe name. But in case of
a ``multiPackage`` the package name has an additional suffix.
"""
return self.__packageName
def getName(self):
"""Get plain recipe name.
In case of a ``multiPackage`` multiple packages may be derived from the
same recipe. This method returns the plain recipe name.
"""
return self.__baseName
def getMetaEnv(self):
return self.__metaEnv
def isRoot(self):
"""Returns True if this is a root recipe."""
return self.__root == True
def isRelocatable(self):
"""Returns True if the packages of this recipe are relocatable.
:meta private:
"""
return self.__relocatable
def isShared(self):
return self.__shared
def jobServer(self):
"""Returns True if the jobserver should be used to schedule builds for
this recipe.
:meta private:
"""
return self.__jobServer
def prepare(self, inputEnv, sandboxEnabled, inputStates, inputSandbox=None,
inputTools=Env(), stack=[]):
# already calculated?
for m in self.__corePackagesByMatch:
if m.matches(inputEnv.detach(), inputTools.detach(), inputStates, inputSandbox):
if set(stack) & m.subTreePackages:
raise ParseError("Recipes are cyclic")
m.touch(inputEnv, inputTools)
if DEBUG['pkgck']:
reusedCorePackage = m.corePackage
break
return m.corePackage, m.subTreePackages
else:
reusedCorePackage = None
# Track tool and sandbox changes
diffSandbox = ...
diffTools = { }
# make copies because we will modify them
sandbox = inputSandbox
if self.__filterTools is None:
inputTools = inputTools.copy()
else:
oldInputTools = set(inputTools.inspect().keys())
inputTools = inputTools.filter(self.__filterTools)
newInputTools = set(inputTools.inspect().keys())
for t in (oldInputTools - newInputTools): diffTools[t] = None
inputTools.touchReset()
tools = inputTools.derive()
inputEnv = inputEnv.derive()
inputEnv.touchReset()
inputEnv.setFunArgs({ "recipe" : self, "sandbox" : bool(sandbox) and sandboxEnabled,
"__tools" : tools })
env = inputEnv.filter(self.__filterEnv)
for i in self.__varSelf:
env = env.derive({ key : env.substitute(value, "environment::"+key)
for key, value in i.items() })
if sandbox is not None:
name = sandbox.coreStep.corePackage.getName()
if not checkGlobList(name, self.__filterSandbox):
sandbox = None
diffSandbox = None
states = { n : s.copy() for (n,s) in inputStates.items() }
# update plugin states
for s in states.values(): s.onEnter(env, self.__properties)
# traverse dependencies
subTreePackages = set()
directPackages = []
indirectPackages = []
provideDeps = UniquePackageList(stack, self.__raiseIncompatibleProvided)
checkoutDeps = []
results = []
depEnv = env.derive()
depTools = tools.derive()
depSandbox = sandbox
| depStates = { n : s.copy() for (n,s) in states.items() } | 8,648 | lcc_e | python | null | 31482c3de325468dbb5d1581a10a5f93f5cd8421770b89bb |
|
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio WebSearch Administrator Interface."""
__revision__ = "$Id$"
import cgi
import random
import time
import sys
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.config import \
CFG_CACHEDIR, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_URL,\
CFG_WEBCOMMENT_ALLOW_COMMENTS, \
CFG_WEBSEARCH_SHOW_COMMENT_COUNT, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_SHOW_REVIEW_COUNT, \
CFG_BIBRANK_SHOW_CITATION_LINKS, \
CFG_INSPIRE_SITE, \
CFG_CERN_SITE
from invenio.bibrankadminlib import \
write_outcome, \
modify_translations, \
get_def_name, \
get_name, \
get_languages, \
addadminbox, \
tupletotable, \
createhiddenform
from invenio.dbquery import \
run_sql, \
get_table_update_time
from invenio.websearch_external_collections import \
external_collections_dictionary, \
external_collection_sort_engine_by_name, \
external_collection_get_state, \
external_collection_get_update_state_list, \
external_collection_apply_changes
from invenio.websearch_external_collections_utils import \
get_collection_descendants
from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_STATES_NAME
#from invenio.bibformat_elements import bfe_references
#from invenio.bibformat_engine import BibFormatObject
from invenio.bibdocfile import BibRecDocs
from invenio.messages import gettext_set_language
#from invenio.bibrank_citation_searcher import get_cited_by
from invenio.access_control_admin import acc_get_action_id
from invenio.access_control_config import VIEWRESTRCOLL
from invenio.errorlib import register_exception
from invenio.intbitset import intbitset
from invenio.bibrank_citation_searcher import get_cited_by_count
from invenio.bibrecord import record_get_field_instances
def getnavtrail(previous = ''):
"""Get the navtrail"""
navtrail = """<a class="navtrail" href="%s/help/admin">Admin Area</a> """ % (CFG_SITE_URL,)
navtrail = navtrail + previous
return navtrail
def fix_collection_scores():
"""
Re-calculate and re-normalize de scores of the collection relationship.
"""
for id_dad in intbitset(run_sql("SELECT id_dad FROM collection_collection")):
for index, id_son in enumerate(run_sql("SELECT id_son FROM collection_collection WHERE id_dad=%s ORDER BY score DESC", (id_dad, ))):
run_sql("UPDATE collection_collection SET score=%s WHERE id_dad=%s AND id_son=%s", (index * 10 + 10, id_dad, id_son[0]))
def perform_modifytranslations(colID, ln, sel_type='', trans=[], confirm=-1, callback='yes'):
"""Modify the translations of a collection
sel_type - the nametype to modify
trans - the translations in the same order as the languages from get_languages()"""
output = ''
subtitle = ''
sitelangs = get_languages()
if type(trans) is str:
trans = [trans]
if confirm in ["2", 2] and colID:
finresult = modify_translations(colID, sitelangs, sel_type, trans, "collection")
col_dict = dict(get_def_name('', "collection"))
if colID and col_dict.has_key(int(colID)):
colID = int(colID)
subtitle = """<a name="3">3. Modify translations for collection '%s'</a> <small>[<a href="%s/help/admin/websearch-admin-guide#3.3">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
if sel_type == '':
sel_type = get_col_nametypes()[0][0]
header = ['Language', 'Translation']
actions = []
types = get_col_nametypes()
if len(types) > 1:
text = """
<span class="adminlabel">Name type</span>
<select name="sel_type" class="admin_w200">
"""
for (key, value) in types:
text += """<option value="%s" %s>%s""" % (key, key == sel_type and 'selected="selected"' or '', value)
trans_names = get_name(colID, ln, key, "collection")
if trans_names and trans_names[0][0]:
text += ": %s" % trans_names[0][0]
text += "</option>"
text += """</select>"""
output += createhiddenform(action="modifytranslations#3",
text=text,
button="Select",
colID=colID,
ln=ln,
confirm=0)
if confirm in [-1, "-1", 0, "0"]:
trans = []
for (key, value) in sitelangs:
try:
trans_names = get_name(colID, key, sel_type, "collection")
trans.append(trans_names[0][0])
except StandardError, e:
trans.append('')
for nr in range(0, len(sitelangs)):
actions.append(["%s" % (sitelangs[nr][1],)])
actions[-1].append('<input type="text" name="trans" size="30" value="%s"/>' % trans[nr])
text = tupletotable(header=header, tuple=actions)
output += createhiddenform(action="modifytranslations#3",
text=text,
button="Modify",
colID=colID,
sel_type=sel_type,
ln=ln,
confirm=2)
if sel_type and len(trans) and confirm in ["2", 2]:
output += write_outcome(finresult)
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_modifytranslations", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyrankmethods(colID, ln, func='', rnkID='', confirm=0, callback='yes'):
"""Modify which rank methods is visible to the collection
func - remove or add rank method
rnkID - the id of the rank method."""
output = ""
subtitle = ""
col_dict = dict(get_def_name('', "collection"))
rnk_dict = dict(get_def_name('', "rnkMETHOD"))
if colID and col_dict.has_key(int(colID)):
colID = int(colID)
if func in ["0", 0] and confirm in ["1", 1]:
finresult = attach_rnk_col(colID, rnkID)
elif func in ["1", 1] and confirm in ["1", 1]:
finresult = detach_rnk_col(colID, rnkID)
subtitle = """<a name="9">9. Modify rank options for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.9">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """
<dl>
<dt>The rank methods enabled for the collection '%s' is:</dt>
""" % col_dict[colID]
rnkmethods = get_col_rnk(colID, ln)
output += """<dd>"""
if not rnkmethods:
output += """No rank methods"""
else:
for id, name in rnkmethods:
output += """%s, """ % name
output += """</dd>
</dl>
"""
rnk_list = get_def_name('', "rnkMETHOD")
rnk_dict_in_col = dict(get_col_rnk(colID, ln))
rnk_list = filter(lambda x: not rnk_dict_in_col.has_key(x[0]), rnk_list)
if rnk_list:
text = """
<span class="adminlabel">Enable:</span>
<select name="rnkID" class="admin_w200">
<option value="-1">- select rank method -</option>
"""
for (id, name) in rnk_list:
text += """<option value="%s" %s>%s</option>""" % (id, (func in ["0", 0] and confirm in ["0", 0] and int(rnkID) == int(id)) and 'selected="selected"' or '' , name)
text += """</select>"""
output += createhiddenform(action="modifyrankmethods#9",
text=text,
button="Enable",
colID=colID,
ln=ln,
func=0,
confirm=1)
if confirm in ["1", 1] and func in ["0", 0] and int(rnkID) != -1:
output += write_outcome(finresult)
elif confirm not in ["0", 0] and func in ["0", 0]:
output += """<b><span class="info">Please select a rank method.</span></b>"""
coll_list = get_col_rnk(colID, ln)
if coll_list:
text = """
<span class="adminlabel">Disable:</span>
<select name="rnkID" class="admin_w200">
<option value="-1">- select rank method-</option>
"""
for (id, name) in coll_list:
text += """<option value="%s" %s>%s</option>""" % (id, (func in ["1", 1] and confirm in ["0", 0] and int(rnkID) == int(id)) and 'selected="selected"' or '' , name)
text += """</select>"""
output += createhiddenform(action="modifyrankmethods#9",
text=text,
button="Disable",
colID=colID,
ln=ln,
func=1,
confirm=1)
if confirm in ["1", 1] and func in ["1", 1] and int(rnkID) != -1:
output += write_outcome(finresult)
elif confirm not in ["0", 0] and func in ["1", 1]:
output += """<b><span class="info">Please select a rank method.</span></b>"""
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_modifyrankmethods", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_addcollectiontotree(colID, ln, add_dad='', add_son='', rtype='', mtype='', callback='yes', confirm=-1):
"""Form to add a collection to the tree.
add_dad - the dad to add the collection to
add_son - the collection to add
rtype - add it as a regular or virtual
mtype - add it to the regular or virtual tree."""
output = ""
output2 = ""
subtitle = """Attach collection to tree <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#2.2">?</a>]</small>""" % (CFG_SITE_URL)
col_dict = dict(get_def_name('', "collection"))
if confirm not in [-1, "-1"] and not (add_son and add_dad and rtype):
output2 += """<b><span class="info">All fields must be filled.</span></b><br /><br />
"""
elif add_son and add_dad and rtype:
add_son = int(add_son)
add_dad = int(add_dad)
if confirm not in [-1, "-1"]:
if add_son == add_dad:
output2 += """<b><span class="info">Cannot add a collection as a pointer to itself.</span></b><br /><br />
"""
elif check_col(add_dad, add_son):
res = add_col_dad_son(add_dad, add_son, rtype)
output2 += write_outcome(res)
if res[0] == 1:
output2 += """<b><span class="info"><br /> The collection will appear on your website after the next webcoll run. You can either run it manually or wait until bibsched does it for you.</span></b><br /><br />
"""
else:
output2 += """<b><span class="info">Cannot add the collection '%s' as a %s subcollection of '%s' since it will either create a loop, or the association already exists.</span></b><br /><br />
""" % (col_dict[add_son], (rtype=="r" and 'regular' or 'virtual'), col_dict[add_dad])
add_son = ''
add_dad = ''
rtype = ''
tree = get_col_tree(colID)
col_list = col_dict.items()
col_list.sort(compare_on_val)
output = show_coll_not_in_tree(colID, ln, col_dict)
text = """
<span class="adminlabel">Attach collection:</span>
<select name="add_son" class="admin_w200">
<option value="">- select collection -</option>
"""
for (id, name) in col_list:
if id != colID:
text += """<option value="%s" %s>%s</option>""" % (id, str(id)==str(add_son) and 'selected="selected"' or '', name)
text += """
</select><br />
<span class="adminlabel">to parent collection:</span>
<select name="add_dad" class="admin_w200">
<option value="">- select parent collection -</option>
"""
for (id, name) in col_list:
text += """<option value="%s" %s>%s</option>
""" % (id, str(id)==add_dad and 'selected="selected"' or '', name)
text += """</select><br />
"""
text += """
<span class="adminlabel">with relationship:</span>
<select name="rtype" class="admin_w200">
<option value="">- select relationship -</option>
<option value="r" %s>Regular (Narrow by...)</option>
<option value="v" %s>Virtual (Focus on...)</option>
</select>
""" % ((rtype=="r" and 'selected="selected"' or ''), (rtype=="v" and 'selected="selected"' or ''))
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/addcollectiontotree" % CFG_SITE_URL,
text=text,
button="Add",
colID=colID,
ln=ln,
confirm=1)
output += output2
#output += perform_showtree(colID, ln)
body = [output]
if callback:
return perform_index(colID, ln, mtype="perform_addcollectiontotree", content=addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_addcollection(colID, ln, colNAME='', dbquery='', callback="yes", confirm=-1):
"""form to add a new collection.
colNAME - the name of the new collection
dbquery - the dbquery of the new collection"""
output = ""
subtitle = """Create new collection <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#2.1">?</a>]</small>""" % (CFG_SITE_URL)
text = """
<span class="adminlabel">Default name</span>
<input class="admin_w200" type="text" name="colNAME" value="%s" /><br />
""" % colNAME
output = createhiddenform(action="%s/admin/websearch/websearchadmin.py/addcollection" % CFG_SITE_URL,
text=text,
colID=colID,
ln=ln,
button="Add collection",
confirm=1)
if colNAME and confirm in ["1", 1]:
res = add_col(colNAME, '')
output += write_outcome(res)
if res[0] == 1:
output += perform_addcollectiontotree(colID=colID, ln=ln, add_son=res[1], callback='')
elif confirm not in ["-1", -1]:
output += """<b><span class="info">Please give the collection a name.</span></b>"""
body = [output]
if callback:
return perform_index(colID, ln=ln, mtype="perform_addcollection", content=addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifydbquery(colID, ln, dbquery='', callback='yes', confirm=-1):
"""form to modify the dbquery of the collection.
dbquery - the dbquery of the collection."""
subtitle = ''
output = ""
col_dict = dict(get_def_name('', "collection"))
if colID and col_dict.has_key(int(colID)):
colID = int(colID)
subtitle = """<a name="1">1. Modify collection query for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.1">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
if confirm == -1:
res = run_sql("SELECT dbquery FROM collection WHERE id=%s" % colID)
dbquery = res[0][0]
if not dbquery:
dbquery = ''
reg_sons = len(get_col_tree(colID, 'r'))
vir_sons = len(get_col_tree(colID, 'v'))
if reg_sons > 1:
if dbquery:
output += "Warning: This collection got subcollections, and should because of this not have a collection query, for further explanation, check the WebSearch Guide<br />"
elif reg_sons <= 1:
if not dbquery:
output += "Warning: This collection does not have any subcollections, and should because of this have a collection query, for further explanation, check the WebSearch Guide<br />"
text = """
<span class="adminlabel">Query</span>
<input class="admin_w200" type="text" name="dbquery" value="%s" /><br />
""" % cgi.escape(dbquery, 1)
output += createhiddenform(action="modifydbquery",
text=text,
button="Modify",
colID=colID,
ln=ln,
confirm=1)
if confirm in ["1", 1]:
res = modify_dbquery(colID, dbquery)
if res:
if dbquery == "":
text = """<b><span class="info">Query removed for this collection.</span></b>"""
else:
text = """<b><span class="info">Query set for this collection.</span></b>"""
else:
text = """<b><span class="info">Sorry, could not change query.</span></b>"""
output += text
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_modifydbquery", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifycollectiontree(colID, ln, move_up='', move_down='', move_from='', move_to='', delete='', rtype='', callback='yes', confirm=0):
"""to modify the collection tree: move a collection up and down, delete a collection, or change the father of the collection.
colID - the main collection of the tree, the root
move_up - move this collection up (is not the collection id, but the place in the tree)
move_up - move this collection down (is not the collection id, but the place in the tree)
move_from - move this collection from the current positon (is not the collection id, but the place in the tree)
move_to - move the move_from collection and set this as it's father. (is not the collection id, but the place in the tree)
delete - delete this collection from the tree (is not the collection id, but the place in the tree)
rtype - the type of the collection in the tree, regular or virtual"""
colID = int(colID)
tree = get_col_tree(colID, rtype)
col_dict = dict(get_def_name('', "collection"))
subtitle = """Modify collection tree: %s <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#2.3">?</a>] <a href="%s/admin/websearch/websearchadmin.py/showtree?colID=%s&ln=%s">Printer friendly version</a></small>""" % (col_dict[colID], CFG_SITE_URL, CFG_SITE_URL, colID, ln)
fin_output = ""
output = ""
try:
if move_up:
move_up = int(move_up)
switch = find_last(tree, move_up)
if switch and switch_col_treescore(tree[move_up], tree[switch]):
output += """<b><span class="info">Moved the %s collection '%s' up and '%s' down.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_up][0]], col_dict[tree[switch][0]])
else:
output += """<b><span class="info">Could not move the %s collection '%s' up and '%s' down.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_up][0]], col_dict[tree[switch][0]])
elif move_down:
move_down = int(move_down)
switch = find_next(tree, move_down)
if switch and switch_col_treescore(tree[move_down], tree[switch]):
output += """<b><span class="info">Moved the %s collection '%s' down and '%s' up.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_down][0]], col_dict[tree[switch][0]])
else:
output += """<b><span class="info">Could not move the %s collection '%s' up and '%s' down.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_up][0]],col_dict[tree[switch][0]])
elif delete:
delete = int(delete)
if confirm in [0, "0"]:
if col_dict[tree[delete][0]] != col_dict[tree[delete][3]]:
text = """<b>Do you want to remove the %s collection '%s' and its subcollections in the %s collection '%s'.</b>
""" % ((tree[delete][4]=="r" and 'regular' or 'virtual'), col_dict[tree[delete][0]], (rtype=="r" and 'regular' or 'virtual'), col_dict[tree[delete][3]])
else:
text = """<b>Do you want to remove all subcollections of the %s collection '%s'.</b>
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[delete][3]])
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/modifycollectiontree#tree" % CFG_SITE_URL,
text=text,
button="Confirm",
colID=colID,
delete=delete,
rtype=rtype,
ln=ln,
confirm=1)
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/index?mtype=perform_modifycollectiontree#tree" % CFG_SITE_URL,
text="<b>To cancel</b>",
button="Cancel",
colID=colID,
ln=ln)
else:
if remove_col_subcol(tree[delete][0], tree[delete][3], rtype):
if col_dict[tree[delete][0]] != col_dict[tree[delete][3]]:
output += """<b><span class="info">Removed the %s collection '%s' and its subcollections in subdirectory '%s'.</span></b><br /><br />
""" % ((tree[delete][4]=="r" and 'regular' or 'virtual'), col_dict[tree[delete][0]], col_dict[tree[delete][3]])
else:
output += """<b><span class="info">Removed the subcollections of the %s collection '%s'.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[delete][3]])
else:
output += """<b><span class="info">Could not remove the collection from the tree.</span></b><br /><br />
"""
delete = ''
elif move_from and not move_to:
move_from_rtype = move_from[0]
move_from_id = int(move_from[1:len(move_from)])
text = """<b>Select collection to place the %s collection '%s' under.</b><br /><br />
""" % ((move_from_rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_from_id][0]])
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/index?mtype=perform_modifycollectiontree#tree" % CFG_SITE_URL,
text=text,
button="Cancel",
colID=colID,
ln=ln)
elif move_from and move_to:
move_from_rtype = move_from[0]
move_from_id = int(move_from[1:len(move_from)])
move_to_rtype = move_to[0]
move_to_id = int(move_to[1:len(move_to)])
tree_from = get_col_tree(colID, move_from_rtype)
tree_to = get_col_tree(colID, move_to_rtype)
if confirm in [0, '0']:
if move_from_id == move_to_id and move_from_rtype == move_to_rtype:
output += """<b><span class="info">Cannot move to itself.</span></b><br /><br />
"""
elif tree_from[move_from_id][3] == tree_to[move_to_id][0] and move_from_rtype==move_to_rtype:
output += """<b><span class="info">The collection is already there.</span></b><br /><br />
"""
elif check_col(tree_to[move_to_id][0], tree_from[move_from_id][0]) or (tree_to[move_to_id][0] == 1 and tree_from[move_from_id][3] == tree_to[move_to_id][0] and move_from_rtype != move_to_rtype):
text = """<b>Move %s collection '%s' to the %s collection '%s'.</b>
""" % ((tree_from[move_from_id][4]=="r" and 'regular' or 'virtual'), col_dict[tree_from[move_from_id][0]], (tree_to[move_to_id][4]=="r" and 'regular' or 'virtual'), col_dict[tree_to[move_to_id][0]])
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/modifycollectiontree#tree" % CFG_SITE_URL,
text=text,
button="Confirm",
colID=colID,
move_from=move_from,
move_to=move_to,
ln=ln,
rtype=rtype,
confirm=1)
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/index?mtype=perform_modifycollectiontree#tree" % CFG_SITE_URL,
text="""<b>To cancel</b>""",
button="Cancel",
colID=colID,
ln=ln)
else:
output += """<b><span class="info">Cannot move the collection '%s' and set it as a subcollection of '%s' since it will create a loop.</span></b><br /><br />
""" % (col_dict[tree_from[move_from_id][0]], col_dict[tree_to[move_to_id][0]])
else:
if (move_to_id != 0 and move_col_tree(tree_from[move_from_id], tree_to[move_to_id])) or (move_to_id == 0 and move_col_tree(tree_from[move_from_id], tree_to[move_to_id], move_to_rtype)):
output += """<b><span class="info">Moved %s collection '%s' to the %s collection '%s'.</span></b><br /><br />
""" % ((move_from_rtype=="r" and 'regular' or 'virtual'), col_dict[tree_from[move_from_id][0]], (move_to_rtype=="r" and 'regular' or 'virtual'), col_dict[tree_to[move_to_id][0]])
else:
output += """<b><span class="info">Could not move %s collection '%s' to the %s collection '%s'.</span></b><br /><br />
""" % ((move_from_rtype=="r" and 'regular' or 'virtual'), col_dict[tree_from[move_from_id][0]], (move_to_rtype=="r" and 'regular' or 'virtual'), col_dict[tree_to[move_to_id][0]])
move_from = ''
move_to = ''
else:
output += """
"""
except StandardError, e:
register_exception()
return """<b><span class="info">An error occured.</span></b>
"""
output += """<table border ="0" width="100%">
<tr><td width="50%">
<b>Narrow by collection:</b>
</td><td width="50%">
<b>Focus on...:</b>
</td></tr><tr><td valign="top">
"""
tree = get_col_tree(colID, 'r')
output += create_colltree(tree, col_dict, colID, ln, move_from, move_to, 'r', "yes")
output += """</td><td valign="top">
"""
tree = get_col_tree(colID, 'v')
output += create_colltree(tree, col_dict, colID, ln, move_from, move_to, 'v', "yes")
output += """</td>
</tr>
</table>
"""
body = [output]
if callback:
return perform_index(colID, ln, mtype="perform_modifycollectiontree", content=addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_showtree(colID, ln):
"""create collection tree/hiarchy"""
col_dict = dict(get_def_name('', "collection"))
subtitle = "Collection tree: %s" % col_dict[int(colID)]
output = """<table border ="0" width="100%">
<tr><td width="50%">
<b>Narrow by collection:</b>
</td><td width="50%">
<b>Focus on...:</b>
</td></tr><tr><td valign="top">
"""
tree = get_col_tree(colID, 'r')
output += create_colltree(tree, col_dict, colID, ln, '', '', 'r', '')
output += """</td><td valign="top">
"""
tree = get_col_tree(colID, 'v')
output += create_colltree(tree, col_dict, colID, ln, '', '', 'v', '')
output += """</td>
</tr>
</table>
"""
body = [output]
return addadminbox(subtitle, body)
def perform_addportalbox(colID, ln, title='', body='', callback='yes', confirm=-1):
"""form to add a new portalbox
title - the title of the portalbox
body - the body of the portalbox"""
col_dict = dict(get_def_name('', "collection"))
colID = int(colID)
subtitle = """<a name="5.1"></a>Create new portalbox"""
text = """
<span class="adminlabel">Title</span>
<textarea cols="50" rows="1" class="admin_wvar" type="text" name="title">%s</textarea><br />
<span class="adminlabel">Body</span>
<textarea cols="50" rows="10" class="admin_wvar" type="text" name="body">%s</textarea><br />
""" % (cgi.escape(title), cgi.escape(body))
output = createhiddenform(action="addportalbox#5.1",
text=text,
button="Add",
colID=colID,
ln=ln,
confirm=1)
if body and confirm in [1, "1"]:
res = add_pbx(title, body)
output += write_outcome(res)
if res[1] == 1:
output += """<b><span class="info"><a href="addexistingportalbox?colID=%s&ln=%s&pbxID=%s#5">Add portalbox to collection</a></span></b>""" % (colID, ln, res[1])
elif confirm not in [-1, "-1"]:
output += """<b><span class="info">Body field must be filled.</span></b>
"""
body = [output]
return perform_showportalboxes(colID, ln, content=addadminbox(subtitle, body))
def perform_addexistingportalbox(colID, ln, pbxID=-1, score=0, position='', sel_ln='', callback='yes', confirm=-1):
"""form to add an existing portalbox to a collection.
colID - the collection to add the portalbox to
pbxID - the portalbox to add
score - the importance of the portalbox.
position - the position of the portalbox on the page
sel_ln - the language of the portalbox"""
subtitle = """<a name="5.2"></a>Add existing portalbox to collection"""
output = ""
colID = int(colID)
res = get_pbx()
pos = get_pbx_pos()
lang = dict(get_languages())
col_dict = dict(get_def_name('', "collection"))
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
col_pbx = get_col_pbx(colID)
col_pbx = dict(map(lambda x: (x[0], x[5]), col_pbx))
if len(res) > 0:
text = """
<span class="adminlabel">Portalbox</span>
<select name="pbxID" class="admin_w200">
<option value="-1">- Select portalbox -</option>
"""
for (id, t_title, t_body) in res:
text += """<option value="%s" %s>%s - %s...</option>\n""" % \
(id, id == int(pbxID) and 'selected="selected"' or '',
t_title[:40], cgi.escape(t_body[0:40 - min(40, len(t_title))]))
text += """</select><br />
<span class="adminlabel">Language</span>
<select name="sel_ln" class="admin_w200">
<option value="">- Select language -</option>
"""
listlang = lang.items()
listlang.sort()
for (key, name) in listlang:
text += """<option value="%s" %s>%s</option>
""" % (key, key == sel_ln and 'selected="selected"' or '', name)
text += """</select><br />
<span class="adminlabel">Position</span>
<select name="position" class="admin_w200">
<option value="">- Select position -</option>
"""
listpos = pos.items()
listpos.sort()
for (key, name) in listpos:
text += """<option value="%s" %s>%s</option>""" % (key, key==position and 'selected="selected"' or '', name)
text += "</select>"
output += createhiddenform(action="addexistingportalbox#5.2",
text=text,
button="Add",
colID=colID,
ln=ln,
confirm=1)
else:
output = """No existing portalboxes to add, please create a new one.
"""
if pbxID > -1 and position and sel_ln and confirm in [1, "1"]:
pbxID = int(pbxID)
res = add_col_pbx(colID, pbxID, sel_ln, position, '')
output += write_outcome(res)
elif pbxID > -1 and confirm not in [-1, "-1"]:
output += """<b><span class="info">All fields must be filled.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showportalboxes(colID, ln, content=output)
def perform_deleteportalbox(colID, ln, pbxID=-1, callback='yes', confirm=-1):
"""form to delete a portalbox which is not in use.
colID - the current collection.
pbxID - the id of the portalbox"""
subtitle = """<a name="5.3"></a>Delete an unused portalbox"""
output = ""
colID = int(colID)
if pbxID not in [-1, "-1"] and confirm in [1, "1"]:
ares = get_pbx()
pbx_dict = dict(map(lambda x: (x[0], x[1]), ares))
if pbx_dict.has_key(int(pbxID)):
pname = pbx_dict[int(pbxID)]
ares = delete_pbx(int(pbxID))
else:
return """<b><span class="info">This portalbox does not exist</span></b>"""
res = get_pbx()
col_dict = dict(get_def_name('', "collection"))
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
col_pbx = get_col_pbx()
col_pbx = dict(map(lambda x: (x[0], x[5]), col_pbx))
if len(res) > 0:
text = """
<span class="adminlabel">Portalbox</span>
<select name="pbxID" class="admin_w200">
"""
text += """<option value="-1">- Select portalbox -"""
for (id, t_title, t_body) in res:
if not col_pbx.has_key(id):
text += """<option value="%s" %s>%s - %s...""" % (id, id == int(pbxID) and 'selected="selected"' or '', t_title, cgi.escape(t_body[0:10]))
text += "</option>"
text += """</select><br />"""
output += createhiddenform(action="deleteportalbox#5.3",
text=text,
button="Delete",
colID=colID,
ln=ln,
confirm=1)
if pbxID not in [-1, "-1"]:
pbxID = int(pbxID)
if confirm in [1, "1"]:
output += write_outcome(ares)
elif confirm not in [-1, "-1"]:
output += """<b><span class="info">Choose a portalbox to delete.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showportalboxes(colID, ln, content=output)
def perform_modifyportalbox(colID, ln, pbxID=-1, score='', position='', sel_ln='', title='', body='', callback='yes', confirm=-1):
"""form to modify a portalbox in a collection, or change the portalbox itself.
colID - the id of the collection.
pbxID - the portalbox to change
score - the score of the portalbox connected to colID which should be changed.
position - the position of the portalbox in collection colID to change."""
subtitle = ""
output = ""
colID = int(colID)
res = get_pbx()
pos = get_pbx_pos()
lang = dict(get_languages())
col_dict = dict(get_def_name('', "collection"))
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
col_pbx = get_col_pbx(colID)
col_pbx = dict(map(lambda x: (x[0], x[5]), col_pbx))
if pbxID not in [-1, "-1"]:
pbxID = int(pbxID)
subtitle = """<a name="5.4"></a>Modify portalbox '%s' for this collection""" % pbx_dict[pbxID]
col_pbx = get_col_pbx(colID)
if not (score and position) and not (body and title):
for (id_pbx, id_collection, tln, score, position, title, body) in col_pbx:
if id_pbx == pbxID:
break
output += """Collection (presentation) specific values (Changes implies only to this collection.)<br />"""
text = """
<span class="adminlabel">Position</span>
<select name="position" class="admin_w200">
"""
listpos = pos.items()
listpos.sort()
for (key, name) in listpos:
text += """<option value="%s" %s>%s""" % (key, key==position and 'selected="selected"' or '', name)
text += "</option>"
text += """</select><br />"""
output += createhiddenform(action="modifyportalbox#5.4",
text=text,
button="Modify",
colID=colID,
pbxID=pbxID,
score=score,
title=title,
body=cgi.escape(body, 1),
sel_ln=sel_ln,
ln=ln,
confirm=3)
if pbxID > -1 and score and position and confirm in [3, "3"]:
pbxID = int(pbxID)
res = modify_pbx(colID, pbxID, sel_ln, score, position, '', '')
res2 = get_pbx()
pbx_dict = dict(map(lambda x: (x[0], x[1]), res2))
output += write_outcome(res)
output += """<br />Portalbox (content) specific values (any changes appears everywhere the portalbox is used.)"""
text = """
<span class="adminlabel">Title</span>
<textarea cols="50" rows="1" class="admin_wvar" type="text" name="title">%s</textarea><br />
""" % cgi.escape(title)
text += """
<span class="adminlabel">Body</span>
<textarea cols="50" rows="10" class="admin_wvar" type="text" name="body">%s</textarea><br />
""" % cgi.escape(body)
output += createhiddenform(action="modifyportalbox#5.4",
text=text,
button="Modify",
colID=colID,
pbxID=pbxID,
sel_ln=sel_ln,
score=score,
position=position,
ln=ln,
confirm=4)
if pbxID > -1 and confirm in [4, "4"]:
pbxID = int(pbxID)
res = modify_pbx(colID, pbxID, sel_ln, '', '', title, body)
output += write_outcome(res)
else:
output = """No portalbox to modify."""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showportalboxes(colID, ln, content=output)
def perform_switchpbxscore(colID, id_1, id_2, sel_ln, ln):
"""Switch the score of id_1 and id_2 in collection_portalbox.
colID - the current collection
id_1/id_2 - the id's to change the score for.
sel_ln - the language of the portalbox"""
output = ""
res = get_pbx()
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
res = switch_pbx_score(colID, id_1, id_2, sel_ln)
output += write_outcome(res)
return perform_showportalboxes(colID, ln, content=output)
def perform_showportalboxes(colID, ln, callback='yes', content='', confirm=-1):
"""show the portalboxes of this collection.
colID - the portalboxes to show the collection for."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
subtitle = """<a name="5">5. Modify portalboxes for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.5">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = ""
pos = get_pbx_pos()
output = """<dl>
<dt>Portalbox actions (not related to this collection)</dt>
<dd><a href="addportalbox?colID=%s&ln=%s#5.1">Create new portalbox</a></dd>
<dd><a href="deleteportalbox?colID=%s&ln=%s#5.3">Delete an unused portalbox</a></dd>
<dt>Collection specific actions</dt>
<dd><a href="addexistingportalbox?colID=%s&ln=%s#5.2">Add existing portalbox to collection</a></dd>
</dl>
""" % (colID, ln, colID, ln, colID, ln)
header = ['Position', 'Language', '', 'Title', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
pos_list = pos.items()
pos_list.sort()
if len(get_col_pbx(colID)) > 0:
for (key, value) in sitelangs:
for (pos_key, pos_value) in pos_list:
res = get_col_pbx(colID, key, pos_key)
i = 0
for (pbxID, colID_pbx, tln, score, position, title, body) in res:
move = """<table cellspacing="1" cellpadding="0" border="0"><tr><td>"""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchpbxscore?colID=%s&ln=%s&id_1=%s&id_2=%s&sel_ln=%s&rand=%s#5"><img border="0" src="%s/img/smallup.gif" title="Move portalbox up" alt="up" /></a>""" % (CFG_SITE_URL, colID, ln, pbxID, res[i - 1][0], tln, random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
move += "</td><td>"
i += 1
if i != len(res):
move += """<a href="%s/admin/websearch/websearchadmin.py/switchpbxscore?colID=%s&ln=%s&id_1=%s&id_2=%s&sel_ln=%s&rand=%s#5"><img border="0" src="%s/img/smalldown.gif" title="Move portalbox down" alt="down" /></a>""" % (CFG_SITE_URL, colID, ln, pbxID, res[i][0], tln, random.randint(0, 1000), CFG_SITE_URL)
move += """</td></tr></table>"""
actions.append(["%s" % (i==1 and pos[position] or ''), "%s" % (i==1 and lang[tln] or ''), move, "%s" % title])
for col in [(('Modify', 'modifyportalbox'), ('Remove', 'removeportalbox'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&pbxID=%s&sel_ln=%s#5.4">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, pbxID, tln, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&pbxID=%s&sel_ln=%s#5.5">%s</a>' % (CFG_SITE_URL, function, colID, ln, pbxID, tln, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No portalboxes exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showportalboxes", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_removeportalbox(colID, ln, pbxID='', sel_ln='', callback='yes', confirm=0):
"""form to remove a portalbox from a collection.
colID - the current collection, remove the portalbox from this collection.
sel_ln - remove the portalbox with this language
pbxID - remove the portalbox with this id"""
subtitle = """<a name="5.5"></a>Remove portalbox"""
output = ""
col_dict = dict(get_def_name('', "collection"))
res = get_pbx()
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
if colID and pbxID and sel_ln:
colID = int(colID)
pbxID = int(pbxID)
if confirm in ["0", 0]:
text = """Do you want to remove the portalbox '%s' from the collection '%s'.""" % (pbx_dict[pbxID], col_dict[colID])
output += createhiddenform(action="removeportalbox#5.5",
text=text,
button="Confirm",
colID=colID,
pbxID=pbxID,
sel_ln=sel_ln,
confirm=1)
elif confirm in ["1", 1]:
res = remove_pbx(colID, pbxID, sel_ln)
output += write_outcome(res)
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showportalboxes(colID, ln, content=output)
def perform_switchfmtscore(colID, type, id_1, id_2, ln):
"""Switch the score of id_1 and id_2 in the table type.
colID - the current collection
id_1/id_2 - the id's to change the score for.
type - like "format" """
fmt_dict = dict(get_def_name('', "format"))
res = switch_score(colID, id_1, id_2, type)
output = write_outcome(res)
return perform_showoutputformats(colID, ln, content=output)
def perform_switchfldscore(colID, id_1, id_2, fmeth, ln):
"""Switch the score of id_1 and id_2 in collection_field_fieldvalue.
colID - the current collection
id_1/id_2 - the id's to change the score for."""
fld_dict = dict(get_def_name('', "field"))
res = switch_fld_score(colID, id_1, id_2)
output = write_outcome(res)
if fmeth == "soo":
return perform_showsortoptions(colID, ln, content=output)
elif fmeth == "sew":
return perform_showsearchfields(colID, ln, content=output)
elif fmeth == "seo":
return perform_showsearchoptions(colID, ln, content=output)
def perform_switchfldvaluescore(colID, id_1, id_fldvalue_1, id_fldvalue_2, ln):
"""Switch the score of id_1 and id_2 in collection_field_fieldvalue.
colID - the current collection
id_1/id_2 - the id's to change the score for."""
name_1 = run_sql("SELECT name from fieldvalue where id=%s", (id_fldvalue_1, ))[0][0]
name_2 = run_sql("SELECT name from fieldvalue where id=%s", (id_fldvalue_2, ))[0][0]
res = switch_fld_value_score(colID, id_1, id_fldvalue_1, id_fldvalue_2)
output = write_outcome(res)
return perform_modifyfield(colID, fldID=id_1, ln=ln, content=output)
def perform_addnewfieldvalue(colID, fldID, ln, name='', value='', callback="yes", confirm=-1):
"""form to add a new fieldvalue.
name - the name of the new fieldvalue
value - the value of the new fieldvalue
"""
output = ""
subtitle = """<a name="7.4"></a>Add new value"""
text = """
<span class="adminlabel">Display name</span>
<input class="admin_w200" type="text" name="name" value="%s" /><br />
<span class="adminlabel">Search value</span>
<input class="admin_w200" type="text" name="value" value="%s" /><br />
""" % (name, value)
output = createhiddenform(action="%s/admin/websearch/websearchadmin.py/addnewfieldvalue" % CFG_SITE_URL,
text=text,
colID=colID,
fldID=fldID,
ln=ln,
button="Add",
confirm=1)
if name and value and confirm in ["1", 1]:
res = add_fldv(name, value)
output += write_outcome(res)
if res[0] == 1:
res = add_col_fld(colID, fldID, 'seo', res[1])
if res[0] == 0:
output += "<br />" + write_outcome(res)
elif confirm not in ["-1", -1]:
output += """<b><span class="info">Please fill in name and value.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID=fldID, ln=ln, content=output)
def perform_modifyfieldvalue(colID, fldID, fldvID, ln, name='', value='', callback="yes", confirm=-1):
"""form to modify a fieldvalue.
name - the name of the fieldvalue
value - the value of the fieldvalue
"""
if confirm in [-1, "-1"]:
res = get_fld_value(fldvID)
(id, name, value) = res[0]
output = ""
subtitle = """<a name="7.4"></a>Modify existing value"""
output = """<dl>
<dt><b><span class="info">Warning: Modifications done below will also inflict on all places the modified data is used.</span></b></dt>
</dl>"""
text = """
<span class="adminlabel">Display name</span>
<input class="admin_w200" type="text" name="name" value="%s" /><br />
<span class="adminlabel">Search value</span>
<input class="admin_w200" type="text" name="value" value="%s" /><br />
""" % (name, value)
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/modifyfieldvalue" % CFG_SITE_URL,
text=text,
colID=colID,
fldID=fldID,
fldvID=fldvID,
ln=ln,
button="Update",
confirm=1)
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/modifyfieldvalue" % CFG_SITE_URL,
text="Delete value and all associations",
colID=colID,
fldID=fldID,
fldvID=fldvID,
ln=ln,
button="Delete",
confirm=2)
if name and value and confirm in ["1", 1]:
res = update_fldv(fldvID, name, value)
output += write_outcome(res)
#if res:
# output += """<b><span class="info">Operation successfully completed.</span></b>"""
#else:
# output += """<b><span class="info">Operation failed.</span></b>"""
elif confirm in ["2", 2]:
res = delete_fldv(fldvID)
output += write_outcome(res)
elif confirm not in ["-1", -1]:
output += """<b><span class="info">Please fill in name and value.</span></b>"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID=fldID, ln=ln, content=output)
def perform_removefield(colID, ln, fldID='', fldvID='', fmeth='', callback='yes', confirm=0):
"""form to remove a field from a collection.
colID - the current collection, remove the field from this collection.
sel_ln - remove the field with this language
fldID - remove the field with this id"""
if fmeth == "soo":
field = "sort option"
elif fmeth == "sew":
field = "search field"
elif fmeth == "seo":
field = "search option"
else:
field = "field"
subtitle = """<a name="6.4"><a name="7.4"><a name="8.4"></a>Remove %s""" % field
output = ""
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
res = get_fld_value()
fldv_dict = dict(map(lambda x: (x[0], x[1]), res))
if colID and fldID:
colID = int(colID)
fldID = int(fldID)
if fldvID and fldvID != "None":
fldvID = int(fldvID)
if confirm in ["0", 0]:
text = """Do you want to remove the %s '%s' %s from the collection '%s'.""" % (field, fld_dict[fldID], (fldvID not in["", "None"] and "with value '%s'" % fldv_dict[fldvID] or ''), col_dict[colID])
output += createhiddenform(action="removefield#6.5",
text=text,
button="Confirm",
colID=colID,
fldID=fldID,
fldvID=fldvID,
fmeth=fmeth,
confirm=1)
elif confirm in ["1", 1]:
res = remove_fld(colID, fldID, fldvID)
output += write_outcome(res)
body = [output]
output = "<br />" + addadminbox(subtitle, body)
if fmeth == "soo":
return perform_showsortoptions(colID, ln, content=output)
elif fmeth == "sew":
return perform_showsearchfields(colID, ln, content=output)
elif fmeth == "seo":
return perform_showsearchoptions(colID, ln, content=output)
def perform_removefieldvalue(colID, ln, fldID='', fldvID='', fmeth='', callback='yes', confirm=0):
"""form to remove a field from a collection.
colID - the current collection, remove the field from this collection.
sel_ln - remove the field with this language
fldID - remove the field with this id"""
subtitle = """<a name="7.4"></a>Remove value"""
output = ""
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
res = get_fld_value()
fldv_dict = dict(map(lambda x: (x[0], x[1]), res))
if colID and fldID:
colID = int(colID)
fldID = int(fldID)
if fldvID and fldvID != "None":
fldvID = int(fldvID)
if confirm in ["0", 0]:
text = """Do you want to remove the value '%s' from the search option '%s'.""" % (fldv_dict[fldvID], fld_dict[fldID])
output += createhiddenform(action="removefieldvalue#7.4",
text=text,
button="Confirm",
colID=colID,
fldID=fldID,
fldvID=fldvID,
fmeth=fmeth,
confirm=1)
elif confirm in ["1", 1]:
res = remove_fld(colID, fldID, fldvID)
output += write_outcome(res)
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID=fldID, ln=ln, content=output)
def perform_rearrangefieldvalue(colID, fldID, ln, callback='yes', confirm=-1):
"""rearrang the fieldvalues alphabetically
colID - the collection
fldID - the field to rearrange the fieldvalue for
"""
subtitle = "Order values alphabetically"
output = ""
col_fldv = get_col_fld(colID, 'seo', fldID)
col_fldv = dict(map(lambda x: (x[1], x[0]), col_fldv))
fldv_names = get_fld_value()
fldv_names = map(lambda x: (x[0], x[1]), fldv_names)
if not col_fldv.has_key(None):
vscore = len(col_fldv)
for (fldvID, name) in fldv_names:
if col_fldv.has_key(fldvID):
run_sql("UPDATE collection_field_fieldvalue SET score_fieldvalue=%s WHERE id_collection=%s and id_field=%s and id_fieldvalue=%s", (vscore, colID, fldID, fldvID))
vscore -= 1
output += write_outcome((1, ""))
else:
output += write_outcome((0, (0, "No values to order")))
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID, ln, content=output)
def perform_rearrangefield(colID, ln, fmeth, callback='yes', confirm=-1):
"""rearrang the fields alphabetically
colID - the collection
"""
subtitle = "Order fields alphabetically"
output = ""
col_fld = dict(map(lambda x: (x[0], x[1]), get_col_fld(colID, fmeth)))
fld_names = get_def_name('', "field")
if len(col_fld) > 0:
score = len(col_fld)
for (fldID, name) in fld_names:
if col_fld.has_key(fldID):
run_sql("UPDATE collection_field_fieldvalue SET score=%s WHERE id_collection=%s and id_field=%s", (score, colID, fldID))
score -= 1
output += write_outcome((1, ""))
else:
output += write_outcome((0, (0, "No fields to order")))
body = [output]
output = "<br />" + addadminbox(subtitle, body)
if fmeth == "soo":
return perform_showsortoptions(colID, ln, content=output)
elif fmeth == "sew":
return perform_showsearchfields(colID, ln, content=output)
elif fmeth == "seo":
return perform_showsearchoptions(colID, ln, content=output)
def perform_addexistingfieldvalue(colID, fldID, fldvID=-1, ln=CFG_SITE_LANG, callback='yes', confirm=-1):
"""form to add an existing fieldvalue to a field.
colID - the collection
fldID - the field to add the fieldvalue to
fldvID - the fieldvalue to add"""
subtitle = """</a><a name="7.4"></a>Add existing value to search option"""
output = ""
if fldvID not in [-1, "-1"] and confirm in [1, "1"]:
fldvID = int(fldvID)
ares = add_col_fld(colID, fldID, 'seo', fldvID)
colID = int(colID)
fldID = int(fldID)
lang = dict(get_languages())
res = get_def_name('', "field")
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(res)
col_fld = dict(map(lambda x: (x[0], x[1]), get_col_fld(colID, 'seo')))
fld_value = get_fld_value()
fldv_dict = dict(map(lambda x: (x[0], x[1]), fld_value))
text = """
<span class="adminlabel">Value</span>
<select name="fldvID" class="admin_w200">
<option value="-1">- Select value -</option>
"""
res = run_sql("SELECT id,name,value FROM fieldvalue ORDER BY name")
for (id, name, value) in res:
text += """<option value="%s" %s>%s - %s</option>
""" % (id, id == int(fldvID) and 'selected="selected"' or '', name, value)
text += """</select><br />"""
output += createhiddenform(action="addexistingfieldvalue#7.4",
text=text,
button="Add",
colID=colID,
fldID=fldID,
ln=ln,
confirm=1)
if fldvID not in [-1, "-1"] and confirm in [1, "1"]:
output += write_outcome(ares)
elif confirm in [1, "1"]:
output += """<b><span class="info">Select a value to add and try again.</span></b>"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID, ln, content=output)
def perform_addexistingfield(colID, ln, fldID=-1, fldvID=-1, fmeth='', callback='yes', confirm=-1):
"""form to add an existing field to a collection.
colID - the collection to add the field to
fldID - the field to add
sel_ln - the language of the field"""
subtitle = """<a name="6.2"></a><a name="7.2"></a><a name="8.2"></a>Add existing field to collection"""
output = ""
if fldID not in [-1, "-1"] and confirm in [1, "1"]:
fldID = int(fldID)
ares = add_col_fld(colID, fldID, fmeth, fldvID)
colID = int(colID)
lang = dict(get_languages())
res = get_def_name('', "field")
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(res)
col_fld = dict(map(lambda x: (x[0], x[1]), get_col_fld(colID, fmeth)))
fld_value = get_fld_value()
fldv_dict = dict(map(lambda x: (x[0], x[1]), fld_value))
if fldvID:
fldvID = int(fldvID)
text = """
<span class="adminlabel">Field</span>
<select name="fldID" class="admin_w200">
<option value="-1">- Select field -</option>
"""
for (id, var) in res:
if fmeth == 'seo' or (fmeth != 'seo' and not col_fld.has_key(id)):
text += """<option value="%s" %s>%s</option>
""" % (id, '', fld_dict[id])
text += """</select><br />"""
output += createhiddenform(action="addexistingfield#6.2",
text=text,
button="Add",
colID=colID,
fmeth=fmeth,
ln=ln,
confirm=1)
if fldID not in [-1, "-1"] and confirm in [1, "1"]:
output += write_outcome(ares)
elif fldID in [-1, "-1"] and confirm not in [-1, "-1"]:
output += """<b><span class="info">Select a field.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
if fmeth == "soo":
return perform_showsortoptions(colID, ln, content=output)
elif fmeth == "sew":
return perform_showsearchfields(colID, ln, content=output)
elif fmeth == "seo":
return perform_showsearchoptions(colID, ln, content=output)
def perform_showsortoptions(colID, ln, callback='yes', content='', confirm=-1):
"""show the sort fields of this collection.."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
fld_type = get_sort_nametypes()
subtitle = """<a name="8">8. Modify sort options for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.8">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """<dl>
<dt>Field actions (not related to this collection)</dt>
<dd>Go to the BibIndex interface to modify the available sort options</dd>
<dt>Collection specific actions
<dd><a href="addexistingfield?colID=%s&ln=%s&fmeth=soo#8.2">Add sort option to collection</a></dd>
<dd><a href="rearrangefield?colID=%s&ln=%s&fmeth=soo#8.2">Order sort options alphabetically</a></dd>
</dl>
""" % (colID, ln, colID, ln)
header = ['', 'Sort option', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
fld_type_list = fld_type.items()
if len(get_col_fld(colID, 'soo')) > 0:
res = get_col_fld(colID, 'soo')
i = 0
for (fldID, fldvID, stype, score, score_fieldvalue) in res:
move = """<table cellspacing="1" cellpadding="0" border="0"><tr><td>"""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=soo&rand=%s#8"><img border="0" src="%s/img/smallup.gif" title="Move up"></a>""" % (CFG_SITE_URL, colID, ln, fldID, res[i - 1][0], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
move += "</td><td>"
i += 1
if i != len(res):
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=soo&rand=%s#8"><img border="0" src="%s/img/smalldown.gif" title="Move down"></a>""" % (CFG_SITE_URL, colID, ln, fldID, res[i][0], random.randint(0, 1000), CFG_SITE_URL)
move += """</td></tr></table>"""
actions.append([move, fld_dict[int(fldID)]])
for col in [(('Remove sort option', 'removefield'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fmeth=soo#8.4">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, fldID, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fmeth=soo#8.5">%s</a>' % (CFG_SITE_URL, function, colID, ln, fldID, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No sort options exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showsortoptions", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_showsearchfields(colID, ln, callback='yes', content='', confirm=-1):
"""show the search fields of this collection.."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
fld_type = get_sort_nametypes()
subtitle = """<a name="6">6. Modify search fields for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.6">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """<dl>
<dt>Field actions (not related to this collection)</dt>
<dd>Go to the BibIndex interface to modify the available search fields</dd>
<dt>Collection specific actions
<dd><a href="addexistingfield?colID=%s&ln=%s&fmeth=sew#6.2">Add search field to collection</a></dd>
<dd><a href="rearrangefield?colID=%s&ln=%s&fmeth=sew#6.2">Order search fields alphabetically</a></dd>
</dl>
""" % (colID, ln, colID, ln)
header = ['', 'Search field', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
fld_type_list = fld_type.items()
if len(get_col_fld(colID, 'sew')) > 0:
res = get_col_fld(colID, 'sew')
i = 0
for (fldID, fldvID, stype, score, score_fieldvalue) in res:
move = """<table cellspacing="1" cellpadding="0" border="0"><tr><td>"""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=sew&rand=%s#6"><img border="0" src="%s/img/smallup.gif" title="Move up"></a>""" % (CFG_SITE_URL, colID, ln, fldID, res[i - 1][0], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
move += "</td><td>"
i += 1
if i != len(res):
move += '<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=sew&rand=%s#6"><img border="0" src="%s/img/smalldown.gif" title="Move down"></a>' % (CFG_SITE_URL, colID, ln, fldID, res[i][0], random.randint(0, 1000), CFG_SITE_URL)
move += """</td></tr></table>"""
actions.append([move, fld_dict[int(fldID)]])
for col in [(('Remove search field', 'removefield'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fmeth=sew#6.4">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, fldID, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s#6.5">%s</a>' % (CFG_SITE_URL, function, colID, ln, fldID, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No search fields exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showsearchfields", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_showsearchoptions(colID, ln, callback='yes', content='', confirm=-1):
"""show the sort and search options of this collection.."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
fld_type = get_sort_nametypes()
subtitle = """<a name="7">7. Modify search options for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.7">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """<dl>
<dt>Field actions (not related to this collection)</dt>
<dd>Go to the BibIndex interface to modify the available search options</dd>
<dt>Collection specific actions
<dd><a href="addexistingfield?colID=%s&ln=%s&fmeth=seo#7.2">Add search option to collection</a></dd>
<dd><a href="rearrangefield?colID=%s&ln=%s&fmeth=seo#7.2">Order search options alphabetically</a></dd>
</dl>
""" % (colID, ln, colID, ln)
header = ['', 'Search option', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
fld_type_list = fld_type.items()
fld_distinct = run_sql("SELECT distinct(id_field) FROM collection_field_fieldvalue WHERE type='seo' AND id_collection=%s ORDER by score desc", (colID, ))
if len(fld_distinct) > 0:
i = 0
for (id) in fld_distinct:
fldID = id[0]
col_fld = get_col_fld(colID, 'seo', fldID)
move = ""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=seo&rand=%s#7"><img border="0" src="%s/img/smallup.gif" title="Move up"></a>""" % (CFG_SITE_URL, colID, ln, fldID, fld_distinct[i - 1][0], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
i += 1
if i != len(fld_distinct):
move += '<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=seo&rand=%s#7"><img border="0" src="%s/img/smalldown.gif" title="Move down"></a>' % (CFG_SITE_URL, colID, ln, fldID, fld_distinct[i][0], random.randint(0, 1000), CFG_SITE_URL)
actions.append([move, "%s" % fld_dict[fldID]])
for col in [(('Modify values', 'modifyfield'), ('Remove search option', 'removefield'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s#7.3">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, fldID, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fmeth=seo#7.3">%s</a>' % (CFG_SITE_URL, function, colID, ln, fldID, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No search options exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showsearchoptions", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyfield(colID, fldID, fldvID='', ln=CFG_SITE_LANG, content='', callback='yes', confirm=0):
"""Modify the fieldvalues for a field"""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
fld_type = get_sort_nametypes()
fldID = int(fldID)
subtitle = """<a name="7.3">Modify values for field '%s'</a>""" % (fld_dict[fldID])
output = """<dl>
<dt>Value specific actions
<dd><a href="addexistingfieldvalue?colID=%s&ln=%s&fldID=%s#7.4">Add existing value to search option</a></dd>
<dd><a href="addnewfieldvalue?colID=%s&ln=%s&fldID=%s#7.4">Add new value to search option</a></dd>
<dd><a href="rearrangefieldvalue?colID=%s&ln=%s&fldID=%s#7.4">Order values alphabetically</a></dd>
</dl>
""" % (colID, ln, fldID, colID, ln, fldID, colID, ln, fldID)
header = ['', 'Value name', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
fld_type_list = fld_type.items()
col_fld = list(get_col_fld(colID, 'seo', fldID))
if len(col_fld) == 1 and col_fld[0][1] is None:
output += """<b><span class="info">No values added for this search option yet</span></b>"""
else:
j = 0
for (fldID, fldvID, stype, score, score_fieldvalue) in col_fld:
fieldvalue = get_fld_value(fldvID)
move = ""
if j != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldvaluescore?colID=%s&ln=%s&id_1=%s&id_fldvalue_1=%s&id_fldvalue_2=%s&rand=%s#7.3"><img border="0" src="%s/img/smallup.gif" title="Move up"></a>""" % (CFG_SITE_URL, colID, ln, fldID, fldvID, col_fld[j - 1][1], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
j += 1
if j != len(col_fld):
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldvaluescore?colID=%s&ln=%s&id_1=%s&id_fldvalue_1=%s&id_fldvalue_2=%s&rand=%s#7.3"><img border="0" src="%s/img/smalldown.gif" title="Move down"></a>""" % (CFG_SITE_URL, colID, ln, fldID, fldvID, col_fld[j][1], random.randint(0, 1000), CFG_SITE_URL)
if fieldvalue[0][1] != fieldvalue[0][2] and fldvID is not None:
actions.append([move, "%s - %s" % (fieldvalue[0][1], fieldvalue[0][2])])
elif fldvID is not None:
actions.append([move, "%s" % fieldvalue[0][1]])
move = ''
for col in [(('Modify value', 'modifyfieldvalue'), ('Remove value', 'removefieldvalue'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fldvID=%s&fmeth=seo#7.4">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, fldID, fldvID, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fldvID=%s#7.4">%s</a>' % (CFG_SITE_URL, function, colID, ln, fldID, fldvID, str)
output += tupletotable(header=header, tuple=actions)
output += content
body = [output]
output = "<br />" + addadminbox(subtitle, body)
if len(col_fld) == 0:
output = content
return perform_showsearchoptions(colID, ln, content=output)
def perform_showoutputformats(colID, ln, callback='yes', content='', confirm=-1):
"""shows the outputformats of the current collection
colID - the collection id."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
subtitle = """<a name="10">10. Modify output formats for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.10">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """
<dl>
<dt>Output format actions (not specific to the chosen collection)
<dd>Go to the BibFormat interface to modify</dd>
<dt>Collection specific actions
<dd><a href="addexistingoutputformat?colID=%s&ln=%s#10.2">Add existing output format to collection</a></dd>
</dl>
""" % (colID, ln)
header = ['', 'Code', 'Output format', 'Actions']
actions = []
col_fmt = get_col_fmt(colID)
fmt_dict = dict(get_def_name('', "format"))
i = 0
if len(col_fmt) > 0:
for (id_format, colID_fld, code, score) in col_fmt:
move = """<table cellspacing="1" cellpadding="0" border="0"><tr><td>"""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfmtscore?colID=%s&ln=%s&type=format&id_1=%s&id_2=%s&rand=%s#10"><img border="0" src="%s/img/smallup.gif" title="Move format up"></a>""" % (CFG_SITE_URL, colID, ln, id_format, col_fmt[i - 1][0], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
move += "</td><td>"
i += 1
if i != len(col_fmt):
move += '<a href="%s/admin/websearch/websearchadmin.py/switchfmtscore?colID=%s&ln=%s&type=format&id_1=%s&id_2=%s&rand=%s#10"><img border="0" src="%s/img/smalldown.gif" title="Move format down"></a>' % (CFG_SITE_URL, colID, ln, id_format, col_fmt[i][0], random.randint(0, 1000), CFG_SITE_URL)
move += """</td></tr></table>"""
actions.append([move, code, fmt_dict[int(id_format)]])
for col in [(('Remove', 'removeoutputformat'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fmtID=%s#10">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, id_format, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fmtID=%s#10">%s</a>' % (CFG_SITE_URL, function, colID, ln, id_format, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No output formats exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showoutputformats", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def external_collections_build_select(colID, external_collection):
output = '<select name="state" class="admin_w200">'
if external_collection.parser:
max_state = 4
else:
max_state = 2
num_selected = external_collection_get_state(external_collection, colID)
for num in range(max_state):
state_name = CFG_EXTERNAL_COLLECTION_STATES_NAME[num]
if num == num_selected:
selected = ' selected'
else:
selected = ''
output += '<option value="%(num)d"%(selected)s>%(state_name)s</option>' % {'num': num, 'selected': selected, 'state_name': state_name}
output += '</select>\n'
return output
def perform_manage_external_collections(colID, ln, callback='yes', content='', confirm=-1):
"""Show the interface to configure external collections to the user."""
colID = int(colID)
subtitle = """<a name="11">11. Configuration of related external collections</a>
<small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.11">?</a>]</small>""" % CFG_SITE_URL
output = '<form action="update_external_collections" method="POST"><input type="hidden" name="colID" value="%(colID)d">' % {'colID': colID}
table_header = ['External collection', 'Mode', 'Apply also to daughter collections?']
table_content = []
external_collections = external_collection_sort_engine_by_name(external_collections_dictionary.values())
for external_collection in external_collections:
collection_name = external_collection.name
select = external_collections_build_select(colID, external_collection)
recurse = '<input type=checkbox name="recurse" value="%(collection_name)s">' % {'collection_name': collection_name}
table_content.append([collection_name, select, recurse])
output += tupletotable(header=table_header, tuple=table_content)
output += '<input class="adminbutton" type="submit" value="Modify"/>'
output += '</form>'
return addadminbox(subtitle, [output])
def perform_update_external_collections(colID, ln, state_list, recurse_list):
colID = int(colID)
changes = []
output = ""
if not state_list:
return 'Warning : No state found.<br />' + perform_manage_external_collections(colID, ln)
external_collections = external_collection_sort_engine_by_name(external_collections_dictionary.values())
if len(external_collections) != len(state_list):
return 'Warning : Size of state_list different from external_collections!<br />' + perform_manage_external_collections(colID, ln)
for (external_collection, state) in zip(external_collections, state_list):
state = int(state)
collection_name = external_collection.name
recurse = recurse_list and collection_name in recurse_list
oldstate = external_collection_get_state(external_collection, colID)
if oldstate != state or recurse:
changes += external_collection_get_update_state_list(external_collection, colID, state, recurse)
external_collection_apply_changes(changes)
return output + '<br /><br />' + perform_manage_external_collections(colID, ln)
def perform_showdetailedrecordoptions(colID, ln, callback='yes', content='', confirm=-1):
"""Show the interface to configure detailed record page to the user."""
colID = int(colID)
subtitle = """<a name="12">12. Configuration of detailed record page</a>
<small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.12">?</a>]</small>""" % CFG_SITE_URL
output = '''<form action="update_detailed_record_options" method="post">
<table><tr><td>
<input type="hidden" name="colID" value="%(colID)d">
<dl>
<dt><b>Show tabs:</b></dt>
<dd>
''' % {'colID': colID}
for (tab_id, tab_info) in get_detailed_page_tabs(colID).iteritems():
if tab_id == 'comments' and \
not CFG_WEBCOMMENT_ALLOW_REVIEWS and \
not CFG_WEBCOMMENT_ALLOW_COMMENTS:
continue
check = ''
output += '''<input type="checkbox" id="id%(tabid)s" name="tabs" value="%(tabid)s" %(check)s />
<label for="id%(tabid)s"> %(label)s</label><br />
''' % {'tabid':tab_id,
'check':((tab_info['visible'] and 'checked="checked"') or ''),
'label':tab_info['label']}
output += '</dd></dl></td><td>'
output += '</td></tr></table><input class="adminbutton" type="submit" value="Modify"/>'
output += '''<input type="checkbox" id="recurse" name="recurse" value="1" />
<label for="recurse"> Also apply to subcollections</label>'''
output += '</form>'
return addadminbox(subtitle, [output])
def perform_update_detailed_record_options(colID, ln, tabs, recurse):
"""Update the preferences for the tab to show/hide in the detailed record page."""
colID = int(colID)
changes = []
output = '<b><span class="info">Operation successfully completed.</span></b>'
if '' in tabs:
tabs.remove('')
tabs.append('metadata')
def update_settings(colID, tabs, recurse):
run_sql("DELETE FROM collectiondetailedrecordpagetabs WHERE id_collection=%s", (colID, ))
run_sql("REPLACE INTO collectiondetailedrecordpagetabs" + \
" SET id_collection=%s, tabs=%s", (colID, ';'.join(tabs)))
## for enabled_tab in tabs:
## run_sql("REPLACE INTO collectiondetailedrecordpagetabs" + \
## " SET id_collection='%s', tabs='%s'" % (colID, ';'.join(tabs)))
if recurse:
for descendant_id in get_collection_descendants(colID):
update_settings(descendant_id, tabs, recurse)
update_settings(colID, tabs, recurse)
## for colID in colIDs:
## run_sql("DELETE FROM collectiondetailedrecordpagetabs WHERE id_collection='%s'" % colID)
## for enabled_tab in tabs:
## run_sql("REPLACE INTO collectiondetailedrecordpagetabs" + \
## " SET id_collection='%s', tabs='%s'" % (colID, ';'.join(tabs)))
#if callback:
return perform_editcollection(colID, ln, "perform_modifytranslations",
'<br /><br />' + output + '<br /><br />' + \
perform_showdetailedrecordoptions(colID, ln))
#else:
# return addadminbox(subtitle, body)
#return output + '<br /><br />' + perform_showdetailedrecordoptions(colID, ln)
def perform_addexistingoutputformat(colID, ln, fmtID=-1, callback='yes', confirm=-1):
"""form to add an existing output format to a collection.
colID - the collection the format should be added to
fmtID - the format to add."""
subtitle = """<a name="10.2"></a>Add existing output format to collection"""
output = ""
if fmtID not in [-1, "-1"] and confirm in [1, "1"]:
ares = add_col_fmt(colID, fmtID)
colID = int(colID)
res = get_def_name('', "format")
fmt_dict = dict(res)
col_dict = dict(get_def_name('', "collection"))
col_fmt = get_col_fmt(colID)
col_fmt = dict(map(lambda x: (x[0], x[2]), col_fmt))
if len(res) > 0:
text = """
<span class="adminlabel">Output format</span>
<select name="fmtID" class="admin_w200">
<option value="-1">- Select output format -</option>
"""
for (id, name) in res:
if not col_fmt.has_key(id):
text += """<option value="%s" %s>%s</option>
""" % (id, id == int(fmtID) and 'selected="selected"' or '', name)
text += """</select><br />
"""
output += createhiddenform(action="addexistingoutputformat#10.2",
text=text,
button="Add",
colID=colID,
ln=ln,
confirm=1)
else:
output = """No existing output formats to add, please create a new one."""
if fmtID not in [-1, "-1"] and confirm in [1, "1"]:
output += write_outcome(ares)
elif fmtID in [-1, "-1"] and confirm not in [-1, "-1"]:
output += """<b><span class="info">Please select output format.</span></b>"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showoutputformats(colID, ln, content=output)
def perform_deleteoutputformat(colID, ln, fmtID=-1, callback='yes', confirm=-1):
"""form to delete an output format not in use.
colID - the collection id of the current collection.
fmtID - the format id to delete."""
subtitle = """<a name="10.3"></a>Delete an unused output format"""
output = """
<dl>
<dd>Deleting an output format will also delete the translations associated.</dd>
</dl>
"""
colID = int(colID)
if fmtID not in [-1, "-1"] and confirm in [1, "1"]:
fmt_dict = dict(get_def_name('', "format"))
old_colNAME = fmt_dict[int(fmtID)]
ares = delete_fmt(int(fmtID))
res = get_def_name('', "format")
fmt_dict = dict(res)
col_dict = dict(get_def_name('', "collection"))
col_fmt = get_col_fmt()
col_fmt = dict(map(lambda x: (x[0], x[2]), col_fmt))
if len(res) > 0:
text = """
<span class="adminlabel">Output format</span>
<select name="fmtID" class="admin_w200">
"""
text += """<option value="-1">- Select output format -"""
for (id, name) in res:
if not col_fmt.has_key(id):
text += """<option value="%s" %s>%s""" % (id, id == int(fmtID) and 'selected="selected"' or '', name)
text += "</option>"
text += """</select><br />"""
output += createhiddenform(action="deleteoutputformat#10.3",
text=text,
button="Delete",
colID=colID,
ln=ln,
confirm=0)
if fmtID not in [-1, "-1"]:
fmtID = int(fmtID)
if confirm in [0, "0"]:
text = """<b>Do you want to delete the output format '%s'.</b>
""" % fmt_dict[fmtID]
output += createhiddenform(action="deleteoutputformat#10.3",
text=text,
button="Confirm",
colID=colID,
fmtID=fmtID,
ln=ln,
confirm=1)
elif confirm in [1, "1"]:
output += write_outcome(ares)
elif confirm not in [-1, "-1"]:
output += """<b><span class="info">Choose a output format to delete.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showoutputformats(colID, ln, content=output)
def perform_removeoutputformat(colID, ln, fmtID='', callback='yes', confirm=0):
"""form to remove an output format from a collection.
colID - the collection id of the current collection.
fmtID - the format id.
"""
subtitle = """<a name="10.5"></a>Remove output format"""
output = ""
col_dict = dict(get_def_name('', "collection"))
fmt_dict = dict(get_def_name('', "format"))
if colID and fmtID:
colID = int(colID)
fmtID = int(fmtID)
if confirm in ["0", 0]:
text = """Do you want to remove the output format '%s' from the collection '%s'.""" % (fmt_dict[fmtID], col_dict[colID])
output += createhiddenform(action="removeoutputformat#10.5",
text=text,
button="Confirm",
colID=colID,
fmtID=fmtID,
confirm=1)
elif confirm in ["1", 1]:
res = remove_fmt(colID, fmtID)
output += write_outcome(res)
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showoutputformats(colID, ln, content=output)
def perform_index(colID=1, ln=CFG_SITE_LANG, mtype='', content='', confirm=0):
"""The index method, calling methods to show the collection tree, create new collections and add collections to tree.
"""
subtitle = "Overview"
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
output = ""
fin_output = ""
if not col_dict.has_key(1):
res = add_col(CFG_SITE_NAME, '')
if res:
fin_output += """<b><span class="info">Created root collection.</span></b><br />"""
else:
return "Cannot create root collection, please check database."
if CFG_SITE_NAME != run_sql("SELECT name from collection WHERE id=1")[0][0]:
res = run_sql("update collection set name=%s where id=1", (CFG_SITE_NAME, ))
if res:
fin_output += """<b><span class="info">The name of the root collection has been modified to be the same as the %(sitename)s installation name given prior to installing %(sitename)s.</span><b><br />""" % {'sitename' : CFG_SITE_NAME}
else:
return "Error renaming root collection."
fin_output += """
<table>
<tr>
<td>0. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_showall">Show all</a></small></td>
<td>1. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_addcollection">Create new collection</a></small></td>
<td>2. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_addcollectiontotree">Attach collection to tree</a></small></td>
<td>3. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_modifycollectiontree">Modify collection tree</a></small></td>
<td>4. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_checkwebcollstatus">Webcoll Status</a></small></td>
</tr><tr>
<td>5. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_checkcollectionstatus">Collection Status</a></small></td>
<td>6. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_checkexternalcollections">Check external collections</a></small></td>
<td>7. <small><a href="%s/help/admin/websearch-admin-guide?ln=%s">Guide</a></small></td>
</tr>
</table>
""" % (CFG_SITE_URL, colID, ln, CFG_SITE_URL, colID, ln, CFG_SITE_URL, colID, ln, CFG_SITE_URL, colID, ln, CFG_SITE_URL, colID, ln, CFG_SITE_URL, colID, ln, CFG_SITE_URL, colID, ln, CFG_SITE_URL, ln)
if mtype == "":
fin_output += """<br /><br /><b><span class="info">To manage the collections, select an item from the menu.</span><b><br />"""
if mtype == "perform_addcollection" and content:
fin_output += content
elif mtype == "perform_addcollection" or mtype == "perform_showall":
fin_output += perform_addcollection(colID=colID, ln=ln, callback='')
fin_output += "<br />"
if mtype == "perform_addcollectiontotree" and content:
fin_output += content
elif mtype == "perform_addcollectiontotree" or mtype == "perform_showall":
fin_output += perform_addcollectiontotree(colID=colID, ln=ln, callback='')
fin_output += "<br />"
if mtype == "perform_modifycollectiontree" and content:
fin_output += content
elif mtype == "perform_modifycollectiontree" or mtype == "perform_showall":
fin_output += perform_modifycollectiontree(colID=colID, ln=ln, callback='')
fin_output += "<br />"
if mtype == "perform_checkwebcollstatus" and content:
fin_output += content
elif mtype == "perform_checkwebcollstatus" or mtype == "perform_showall":
fin_output += perform_checkwebcollstatus(colID, ln, callback='')
if mtype == "perform_checkcollectionstatus" and content:
fin_output += content
elif mtype == "perform_checkcollectionstatus" or mtype == "perform_showall":
fin_output += perform_checkcollectionstatus(colID, ln, callback='')
if mtype == "perform_checkexternalcollections" and content:
fin_output += content
elif mtype == "perform_checkexternalcollections" or mtype == "perform_showall":
fin_output += perform_checkexternalcollections(colID, ln, callback='')
body = [fin_output]
body = [fin_output]
return addadminbox('<b>Menu</b>', body)
def show_coll_not_in_tree(colID, ln, col_dict):
"""Returns collections not in tree"""
tree = get_col_tree(colID)
in_tree = {}
output = "These collections are not in the tree, and should be added:<br />"
for (id, up, down, dad, reltype) in tree:
in_tree[id] = 1
in_tree[dad] = 1
res = run_sql("SELECT id from collection")
if len(res) != len(in_tree):
for id in res:
if not in_tree.has_key(id[0]):
output += """<a href="%s/admin/websearch/websearchadmin.py/editcollection?colID=%s&ln=%s" title="Edit collection">%s</a> ,
""" % (CFG_SITE_URL, id[0], ln, col_dict[id[0]])
output += "<br /><br />"
else:
output = ""
return output
def create_colltree(tree, col_dict, colID, ln, move_from='', move_to='', rtype='', edit=''):
"""Creates the presentation of the collection tree, with the buttons for modifying it.
tree - the tree to present, from get_tree()
col_dict - the name of the collections in a dictionary
colID - the collection id to start with
move_from - if a collection to be moved has been chosen
move_to - the collection which should be set as father of move_from
rtype - the type of the tree, regular or virtual
edit - if the method should output the edit buttons."""
if move_from:
move_from_rtype = move_from[0]
move_from_id = int(move_from[1:len(move_from)])
tree_from = get_col_tree(colID, move_from_rtype)
tree_to = get_col_tree(colID, rtype)
tables = 0
tstack = []
i = 0
text = """
<table border ="0" cellspacing="0" cellpadding="0">"""
for i in range(0, len(tree)):
id_son = tree[i][0]
up = tree[i][1]
down = tree[i][2]
dad = tree[i][3]
reltype = tree[i][4]
tmove_from = ""
j = i
while j > 0:
j = j - 1
try:
if tstack[j][1] == dad:
table = tstack[j][2]
for k in range(0, tables - table):
tables = tables - 1
text += """</table></td></tr>
"""
break
except StandardError, e:
pass
text += """<tr><td>
"""
if i > 0 and tree[i][1] == 0:
tables = tables + 1
text += """</td><td></td><td></td><td></td><td><table border="0" cellspacing="0" cellpadding="0"><tr><td>
"""
if i == 0:
tstack.append((id_son, dad, 1))
else:
tstack.append((id_son, dad, tables))
if up == 1 and edit:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&move_up=%s&rtype=%s#%s"><img border="0" src="%s/img/smallup.gif" title="Move collection up"></a>""" % (CFG_SITE_URL, colID, ln, i, rtype, tree[i][0], CFG_SITE_URL)
else:
text += """ """
text += "</td><td>"
if down == 1 and edit:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&move_down=%s&rtype=%s#%s"><img border="0" src="%s/img/smalldown.gif" title="Move collection down"></a>""" % (CFG_SITE_URL, colID, ln, i, rtype, tree[i][0], CFG_SITE_URL)
else:
text += """ """
text += "</td><td>"
if edit:
if move_from and move_to:
tmove_from = move_from
move_from = ''
if not (move_from == "" and i == 0) and not (move_from != "" and int(move_from[1:len(move_from)]) == i and rtype == move_from[0]):
check = "true"
if move_from:
#if tree_from[move_from_id][0] == tree_to[i][0] or not check_col(tree_to[i][0], tree_from[move_from_id][0]):
# check = ''
#elif not check_col(tree_to[i][0], tree_from[move_from_id][0]):
# check = ''
#if not check and (tree_to[i][0] == 1 and tree_from[move_from_id][3] == tree_to[i][0] and move_from_rtype != rtype):
# check = "true"
if check:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&move_from=%s&move_to=%s%s&rtype=%s#tree"><img border="0" src="%s/img/move_to.gif" title="Move '%s' to '%s'"></a>
""" % (CFG_SITE_URL, colID, ln, move_from, rtype, i, rtype, CFG_SITE_URL, col_dict[tree_from[int(move_from[1:len(move_from)])][0]], col_dict[tree_to[i][0]])
else:
try:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&move_from=%s%s&rtype=%s#%s"><img border="0" src="%s/img/move_from.gif" title="Move '%s' from this location."></a>""" % (CFG_SITE_URL, colID, ln, rtype, i, rtype, tree[i][0], CFG_SITE_URL, col_dict[tree[i][0]])
except KeyError:
pass
else:
text += """<img border="0" src="%s/img/white_field.gif">
""" % CFG_SITE_URL
else:
text += """<img border="0" src="%s/img/white_field.gif">
""" % CFG_SITE_URL
text += """
</td>
<td>"""
if edit:
try:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&delete=%s&rtype=%s#%s"><img border="0" src="%s/img/iconcross.gif" title="Remove colletion from tree"></a>""" % (CFG_SITE_URL, colID, ln, i, rtype, tree[i][0], CFG_SITE_URL)
except KeyError:
pass
elif i != 0:
text += """<img border="0" src="%s/img/white_field.gif">
""" % CFG_SITE_URL
text += """</td><td>
"""
if tmove_from:
move_from = tmove_from
try:
text += """<a name="%s"></a>%s<a href="%s/admin/websearch/websearchadmin.py/editcollection?colID=%s&ln=%s" title="Edit collection">%s</a>%s%s%s""" % (tree[i][0], (reltype=="v" and '<i>' or ''), CFG_SITE_URL, tree[i][0], ln, col_dict[id_son], (move_to=="%s%s" %(rtype, i) and ' <img border="0" src="%s/img/move_to.gif">' % CFG_SITE_URL or ''), (move_from=="%s%s" % (rtype, i) and ' <img border="0" src="%s/img/move_from.gif">' % CFG_SITE_URL or ''), (reltype=="v" and '</i>' or ''))
except KeyError:
pass
text += """</td></tr>
"""
while tables > 0:
text += """</table></td></tr>
"""
tables = tables - 1
text += """</table>
"""
return text
def perform_deletecollection(colID, ln, confirm=-1, callback='yes'):
"""form to delete a collection
colID - id of collection
"""
subtitle =''
output = """
<span class="warning">
<strong>
<dl>
<dt>WARNING:</dt>
<dd>When deleting a collection, you also deletes all data related to the collection like translations, relations to other collections and information about which rank methods to use.
<br />For more information, please go to the <a title="See guide" href="%s/help/admin/websearch-admin-guide">WebSearch guide</a> and read the section regarding deleting a collection.</dd>
</dl>
</strong>
</span>
""" % CFG_SITE_URL
col_dict = dict(get_def_name('', "collection"))
if colID != 1 and colID and col_dict.has_key(int(colID)):
colID = int(colID)
subtitle = """<a name="4">4. Delete collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.4">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
res = run_sql("SELECT id_dad,id_son,type,score from collection_collection WHERE id_dad=%s", (colID, ))
res2 = run_sql("SELECT id_dad,id_son,type,score from collection_collection WHERE id_son=%s", (colID, ))
if not res and not res2:
if confirm in ["-1", -1]:
text = """Do you want to delete this collection."""
output += createhiddenform(action="deletecollection#4",
text=text,
colID=colID,
button="Delete",
confirm=0)
elif confirm in ["0", 0]:
text = """Are you sure you want to delete this collection."""
output += createhiddenform(action="deletecollection#4",
text=text,
colID=colID,
button="Confirm",
confirm=1)
elif confirm in ["1", 1]:
result = delete_col(colID)
if not result:
raise Exception
else:
output = """<b><span class="info">Can not delete a collection that is a part of the collection tree, remove collection from the tree and try again.</span></b>"""
else:
subtitle = """4. Delete collection"""
output = """<b><span class="info">Not possible to delete the root collection</span></b>"""
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_deletecollection", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_editcollection(colID=1, ln=CFG_SITE_LANG, mtype='', content=''):
"""interface to modify a collection. this method is calling other methods which again is calling this and sending back the output of the method.
if callback, the method will call perform_editcollection, if not, it will just return its output.
colID - id of the collection
mtype - the method that called this method.
content - the output from that method."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
if not col_dict.has_key(colID):
return """<b><span class="info">Collection deleted.</span></b>
"""
fin_output = """
<table>
<tr>
<td><b>Menu</b></td>
</tr>
<tr>
<td>0. <small><a href="editcollection?colID=%s&ln=%s">Show all</a></small></td>
<td>1. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_modifydbquery">Modify collection query</a></small></td>
<td>2. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_modifyrestricted">Modify access restrictions</a></small></td>
<td>3. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_modifytranslations">Modify translations</a></small></td>
<td>4. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_deletecollection">Delete collection</a></small></td>
</tr><tr>
<td>5. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showportalboxes">Modify portalboxes</a></small></td>
<td>6. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showsearchfields#6">Modify search fields</a></small></td>
<td>7. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showsearchoptions#7">Modify search options</a></small></td>
<td>8. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showsortoptions#8">Modify sort options</a></small></td>
<td>9. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_modifyrankmethods#9">Modify rank options</a></small></td>
</tr><tr>
<td>10. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showoutputformats#10">Modify output formats</a></small></td>
<td>11. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_manage_external_collections#11">Configuration of related external collections</a></small></td>
<td>12. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showdetailedrecordoptions#12">Detailed record page options</a></small></td>
</tr>
</table>
""" % (colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln)
if mtype == "perform_modifydbquery" and content:
fin_output += content
elif mtype == "perform_modifydbquery" or not mtype:
fin_output += perform_modifydbquery(colID, ln, callback='')
if mtype == "perform_modifyrestricted" and content:
fin_output += content
elif mtype == "perform_modifyrestricted" or not mtype:
fin_output += perform_modifyrestricted(colID, ln, callback='')
if mtype == "perform_modifytranslations" and content:
fin_output += content
elif mtype == "perform_modifytranslations" or not mtype:
fin_output += perform_modifytranslations(colID, ln, callback='')
if mtype == "perform_deletecollection" and content:
fin_output += content
elif mtype == "perform_deletecollection" or not mtype:
fin_output += perform_deletecollection(colID, ln, callback='')
if mtype == "perform_showportalboxes" and content:
fin_output += content
elif mtype == "perform_showportalboxes" or not mtype:
fin_output += perform_showportalboxes(colID, ln, callback='')
if mtype == "perform_showsearchfields" and content:
fin_output += content
elif mtype == "perform_showsearchfields" or not mtype:
fin_output += perform_showsearchfields(colID, ln, callback='')
if mtype == "perform_showsearchoptions" and content:
fin_output += content
elif mtype == "perform_showsearchoptions" or not mtype:
fin_output += perform_showsearchoptions(colID, ln, callback='')
if mtype == "perform_showsortoptions" and content:
fin_output += content
elif mtype == "perform_showsortoptions" or not mtype:
fin_output += perform_showsortoptions(colID, ln, callback='')
if mtype == "perform_modifyrankmethods" and content:
fin_output += content
elif mtype == "perform_modifyrankmethods" or not mtype:
fin_output += perform_modifyrankmethods(colID, ln, callback='')
if mtype == "perform_showoutputformats" and content:
fin_output += content
elif mtype == "perform_showoutputformats" or not mtype:
fin_output += perform_showoutputformats(colID, ln, callback='')
if mtype == "perform_manage_external_collections" and content:
fin_output += content
elif mtype == "perform_manage_external_collections" or not mtype:
fin_output += perform_manage_external_collections(colID, ln, callback='')
if mtype == "perform_showdetailedrecordoptions" and content:
fin_output += content
elif mtype == "perform_showdetailedrecordoptions" or not mtype:
fin_output += perform_showdetailedrecordoptions(colID, ln, callback='')
return addadminbox("Overview of edit options for collection '%s'" % col_dict[colID], [fin_output])
def perform_checkwebcollstatus(colID, ln, confirm=0, callback='yes'):
"""Check status of the collection tables with respect to the webcoll cache."""
subtitle = """<a name="11"></a>Webcoll Status [<a href="%s/help/admin/websearch-admin-guide#5">?</a>]""" % CFG_SITE_URL
output = ""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
output += """<br /><b>Last updates:</b><br />"""
collection_table_update_time = ""
collection_web_update_time = ""
collection_table_update_time = get_table_update_time('collection')
output += "Collection table last updated: %s<br />" % collection_table_update_time
try:
file = open("%s/collections/last_updated" % CFG_CACHEDIR)
collection_web_update_time = file.readline().strip()
output += "Collection cache last updated: %s<br />" % collection_web_update_time
file.close()
except:
pass
# reformat collection_web_update_time to the format suitable for comparisons
try:
collection_web_update_time = time.strftime("%Y-%m-%d %H:%M:%S",
time.strptime(collection_web_update_time, "%d %b %Y %H:%M:%S"))
except ValueError, e:
pass
if collection_table_update_time > collection_web_update_time:
output += """<br /><b><span class="info">Warning: The collections have been modified since last time Webcoll was executed, to process the changes, Webcoll must be executed.</span></b><br />"""
header = ['ID', 'Name', 'Time', 'Status', 'Progress']
actions = []
output += """<br /><b>Last BibSched tasks:</b><br />"""
res = run_sql("select id, proc, host, user, runtime, sleeptime, arguments, status, progress from schTASK where proc='webcoll' and runtime< now() ORDER by runtime")
if len(res) > 0:
(id, proc, host, user, runtime, sleeptime, arguments, status, progress) = res[len(res) - 1]
webcoll__update_time = runtime
actions.append([id, proc, runtime, (status !="" and status or ''), (progress !="" and progress or '')])
else:
actions.append(['', 'webcoll', '', '', 'Not executed yet'])
res = run_sql("select id, proc, host, user, runtime, sleeptime, arguments, status, progress from schTASK where proc='bibindex' and runtime< now() ORDER by runtime")
if len(res) > 0:
(id, proc, host, user, runtime, sleeptime, arguments, status, progress) = res[len(res) - 1]
actions.append([id, proc, runtime, (status !="" and status or ''), (progress !="" and progress or '')])
else:
actions.append(['', 'bibindex', '', '', 'Not executed yet'])
output += tupletotable(header=header, tuple=actions)
output += """<br /><b>Next scheduled BibSched run:</b><br />"""
actions = []
res = run_sql("select id, proc, host, user, runtime, sleeptime, arguments, status, progress from schTASK where proc='webcoll' and runtime > now() ORDER by runtime")
webcoll_future = ""
if len(res) > 0:
(id, proc, host, user, runtime, sleeptime, arguments, status, progress) = res[0]
webcoll__update_time = runtime
actions.append([id, proc, runtime, (status !="" and status or ''), (progress !="" and progress or '')])
webcoll_future = "yes"
else:
actions.append(['', 'webcoll', '', '', 'Not scheduled'])
res = run_sql("select id, proc, host, user, runtime, sleeptime, arguments, status, progress from schTASK where proc='bibindex' and runtime > now() ORDER by runtime")
bibindex_future = ""
if len(res) > 0:
(id, proc, host, user, runtime, sleeptime, arguments, status, progress) = res[0]
actions.append([id, proc, runtime, (status !="" and status or ''), (progress !="" and progress or '')])
bibindex_future = "yes"
else:
actions.append(['', 'bibindex', '', '', 'Not scheduled'])
output += tupletotable(header=header, tuple=actions)
if webcoll_future == "":
output += """<br /><b><span class="info">Warning: Webcoll is not scheduled for a future run by bibsched, any updates to the collection will not be processed.</span></b><br />"""
if bibindex_future == "":
output += """<br /><b><span class="info">Warning: Bibindex is not scheduled for a future run by bibsched, any updates to the records will not be processed.</span></b><br />"""
body = [output]
if callback:
return perform_index(colID, ln, "perform_checkwebcollstatus", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyrestricted(colID, ln, rest='', callback='yes', confirm=-1):
"""modify which apache group is allowed to access the collection.
rest - the groupname"""
subtitle = ''
output = ""
col_dict = dict(get_def_name('', "collection"))
action_id = acc_get_action_id(VIEWRESTRCOLL)
if colID and col_dict.has_key(int(colID)):
colID = int(colID)
subtitle = """<a name="2">2. Modify access restrictions for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.2">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """<p>Please note that Invenio versions greater than <em>0.92.1</em> manage collection restriction via the standard
<strong><a href="/admin/webaccess/webaccessadmin.py/showactiondetails?id_action=%i">WebAccess Admin Interface</a></strong> (action '%s').</p>
""" % (action_id, VIEWRESTRCOLL)
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_modifyrestricted", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_checkcollectionstatus(colID, ln, confirm=0, callback='yes'):
"""Check the configuration of the collections."""
from invenio.search_engine import collection_restricted_p, restricted_collection_cache
subtitle = """<a name="11"></a>Collection Status [<a href="%s/help/admin/websearch-admin-guide#6">?</a>]""" % CFG_SITE_URL
output = ""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
collections = run_sql("SELECT id, name, dbquery, nbrecs FROM collection "
"ORDER BY id")
header = ['ID', 'Name','Query', 'Subcollections', 'Restricted', 'Hosted',
'I18N', 'Status', 'Number of records']
rnk_list = get_def_name('', "rnkMETHOD")
actions = []
restricted_collection_cache.recreate_cache_if_needed()
for (id, name, dbquery, nbrecs) in collections:
reg_sons = col_has_son(id, 'r')
vir_sons = col_has_son(id, 'v')
status = ""
hosted = ""
if str(dbquery).startswith("hostedcollection:"): hosted = """<b><span class="info">Yes</span></b>"""
else: hosted = """<b><span class="info">No</span></b>"""
langs = run_sql("SELECT ln from collectionname where id_collection=%s", (id, ))
i8n = ""
if len(langs) > 0:
for lang in langs:
i8n += "%s, " % lang
else:
i8n = """<b><span class="info">None</span></b>"""
if reg_sons and dbquery:
status = """<b><span class="warning">1:Conflict</span></b>"""
elif not dbquery and not reg_sons:
status = """<b><span class="warning">2:Empty</span></b>"""
if (reg_sons or vir_sons):
subs = """<b><span class="info">Yes</span></b>"""
else:
subs = """<b><span class="info">No</span></b>"""
if dbquery is None:
dbquery = """<b><span class="info">No</span></b>"""
restricted = collection_restricted_p(name, recreate_cache_if_needed=False)
if restricted:
restricted = """<b><span class="warning">Yes</span></b>"""
if status:
status += """<b><span class="warning">,3:Restricted</span></b>"""
else:
status += """<b><span class="warning">3:Restricted</span></b>"""
else:
restricted = """<b><span class="info">No</span></b>"""
if status == "":
status = """<b><span class="info">OK</span></b>"""
actions.append([id, """<a href="%s/admin/websearch/websearchadmin.py/editcollection?colID=%s&ln=%s">%s</a>""" % (CFG_SITE_URL, id, ln, name), dbquery, subs, restricted, hosted, i8n, status, nbrecs])
output += tupletotable(header=header, tuple=actions)
body = [output]
return addadminbox(subtitle, body)
if callback:
return perform_index(colID, ln, "perform_checkcollectionstatus", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_checkexternalcollections(colID, ln, icl=None, update="", confirm=0, callback='yes'):
"""Check the external collections for inconsistencies."""
subtitle = """<a name="7"></a>Check external collections [<a href="%s/help/admin/websearch-admin-guide#7">?</a>]""" % CFG_SITE_URL
output = ""
colID = int(colID)
if icl:
if update == "add":
# icl : the "inconsistent list" comes as a string, it has to be converted back into a list
icl = eval(icl)
#icl = icl[1:-1].split(',')
for collection in icl:
#collection = str(collection[1:-1])
query_select = "SELECT name FROM externalcollection WHERE name like '%(name)s';" % {'name': collection}
results_select = run_sql(query_select)
if not results_select:
query_insert = "INSERT INTO externalcollection (name) VALUES ('%(name)s');" % {'name': collection}
run_sql(query_insert)
output += """<br /><span class=info>New collection \"%s\" has been added to the database table \"externalcollection\".</span><br />""" % (collection)
else:
output += """<br /><span class=info>Collection \"%s\" has already been added to the database table \"externalcollection\" or was already there.</span><br />""" % (collection)
elif update == "del":
# icl : the "inconsistent list" comes as a string, it has to be converted back into a list
icl = eval(icl)
#icl = icl[1:-1].split(',')
for collection in icl:
#collection = str(collection[1:-1])
query_select = "SELECT id FROM externalcollection WHERE name like '%(name)s';" % {'name': collection}
results_select = run_sql(query_select)
if results_select:
query_delete = "DELETE FROM externalcollection WHERE id like '%(id)s';" % {'id': results_select[0][0]}
query_delete_states = "DELETE FROM collection_externalcollection WHERE id_externalcollection like '%(id)s';" % {'id': results_select[0][0]}
run_sql(query_delete)
run_sql(query_delete_states)
output += """<br /><span class=info>Collection \"%s\" has been deleted from the database table \"externalcollection\".</span><br />""" % (collection)
else:
output += """<br /><span class=info>Collection \"%s\" has already been delete from the database table \"externalcollection\" or was never there.</span><br />""" % (collection)
external_collections_file = []
external_collections_db = []
for coll in external_collections_dictionary.values():
external_collections_file.append(coll.name)
external_collections_file.sort()
query = """SELECT name from externalcollection"""
results = run_sql(query)
for result in results:
external_collections_db.append(result[0])
external_collections_db.sort()
number_file = len(external_collections_file)
number_db = len(external_collections_db)
if external_collections_file == external_collections_db:
output += """<br /><span class="info">External collections are consistent.</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections""" % {
"number_db" : number_db,
"number_file" : number_file}
elif len(external_collections_file) > len(external_collections_db):
external_collections_diff = list(set(external_collections_file) - set(external_collections_db))
external_collections_db.extend(external_collections_diff)
external_collections_db.sort()
if external_collections_file == external_collections_db:
output += """<br /><span class="warning">There is an inconsistency:</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections
(<span class="warning">missing: %(diff)s</span>)<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections
<br /><br /><a href="%(site_url)s/admin/websearch/websearchadmin.py/checkexternalcollections?colID=%(colID)s&icl=%(diff)s&update=add&ln=%(ln)s">
Click here</a> to update your database adding the missing collections. If the problem persists please check your configuration manually.""" % {
"number_db" : number_db,
"number_file" : number_file,
"diff" : external_collections_diff,
"site_url" : CFG_SITE_URL,
"colID" : colID,
"ln" : ln}
else:
output += """<br /><span class="warning">There is an inconsistency:</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections
<br /><br /><span class="warning">The external collections do not match.</span>
<br />To fix the problem please check your configuration manually.""" % {
"number_db" : number_db,
"number_file" : number_file}
elif len(external_collections_file) < len(external_collections_db):
external_collections_diff = list(set(external_collections_db) - set(external_collections_file))
external_collections_file.extend(external_collections_diff)
external_collections_file.sort()
if external_collections_file == external_collections_db:
output += """<br /><span class="warning">There is an inconsistency:</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections
| (<span class="warning">extra: %(diff)s</span>)<br /> | 11,089 | lcc_e | python | null | 6fbc14fb9561eb8fa28cc69fbce940bc349c14bfe4c279e1 |
|
# -*- coding: utf-8 -*-
#
# pylast - A Python interface to Last.fm (and other API compatible social networks)
#
# Copyright 2008-2010 Amr Hassan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# http://code.google.com/p/pylast/
__version__ = '0.5'
__author__ = 'Amr Hassan'
__copyright__ = "Copyright (C) 2008-2010 Amr Hassan"
__license__ = "apache2"
__email__ = 'amr.hassan@gmail.com'
import hashlib
from xml.dom import minidom
import xml.dom
import time
import shelve
import tempfile
import sys
import collections
import warnings
def _deprecation_warning(message):
warnings.warn(message, DeprecationWarning)
if sys.version_info[0] == 3:
from http.client import HTTPConnection
import html.entities as htmlentitydefs
from urllib.parse import splithost as url_split_host
from urllib.parse import quote_plus as url_quote_plus
unichr = chr
elif sys.version_info[0] == 2:
from httplib import HTTPConnection
import htmlentitydefs
from urllib import splithost as url_split_host
from urllib import quote_plus as url_quote_plus
STATUS_INVALID_SERVICE = 2
STATUS_INVALID_METHOD = 3
STATUS_AUTH_FAILED = 4
STATUS_INVALID_FORMAT = 5
STATUS_INVALID_PARAMS = 6
STATUS_INVALID_RESOURCE = 7
STATUS_TOKEN_ERROR = 8
STATUS_INVALID_SK = 9
STATUS_INVALID_API_KEY = 10
STATUS_OFFLINE = 11
STATUS_SUBSCRIBERS_ONLY = 12
STATUS_INVALID_SIGNATURE = 13
STATUS_TOKEN_UNAUTHORIZED = 14
STATUS_TOKEN_EXPIRED = 15
EVENT_ATTENDING = '0'
EVENT_MAYBE_ATTENDING = '1'
EVENT_NOT_ATTENDING = '2'
PERIOD_OVERALL = 'overall'
PERIOD_7DAYS = "7day"
PERIOD_3MONTHS = '3month'
PERIOD_6MONTHS = '6month'
PERIOD_12MONTHS = '12month'
DOMAIN_ENGLISH = 0
DOMAIN_GERMAN = 1
DOMAIN_SPANISH = 2
DOMAIN_FRENCH = 3
DOMAIN_ITALIAN = 4
DOMAIN_POLISH = 5
DOMAIN_PORTUGUESE = 6
DOMAIN_SWEDISH = 7
DOMAIN_TURKISH = 8
DOMAIN_RUSSIAN = 9
DOMAIN_JAPANESE = 10
DOMAIN_CHINESE = 11
COVER_SMALL = 0
COVER_MEDIUM = 1
COVER_LARGE = 2
COVER_EXTRA_LARGE = 3
COVER_MEGA = 4
IMAGES_ORDER_POPULARITY = "popularity"
IMAGES_ORDER_DATE = "dateadded"
USER_MALE = 'Male'
USER_FEMALE = 'Female'
SCROBBLE_SOURCE_USER = "P"
SCROBBLE_SOURCE_NON_PERSONALIZED_BROADCAST = "R"
SCROBBLE_SOURCE_PERSONALIZED_BROADCAST = "E"
SCROBBLE_SOURCE_LASTFM = "L"
SCROBBLE_SOURCE_UNKNOWN = "U"
SCROBBLE_MODE_PLAYED = ""
SCROBBLE_MODE_LOVED = "L"
SCROBBLE_MODE_BANNED = "B"
SCROBBLE_MODE_SKIPPED = "S"
class _Network(object):
"""
A music social network website that is Last.fm or one exposing a Last.fm compatible API
"""
def __init__(self, name, homepage, ws_server, api_key, api_secret, session_key, submission_server, username, password_hash,
domain_names, urls):
"""
name: the name of the network
homepage: the homepage url
ws_server: the url of the webservices server
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
submission_server: the url of the server to which tracks are submitted (scrobbled)
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
domain_names: a dict mapping each DOMAIN_* value to a string domain name
urls: a dict mapping types to urls
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
Either a valid session_key or a combination of username and password_hash must be present for scrobbling.
You should use a preconfigured network object through a get_*_network(...) method instead of creating an object
of this class, unless you know what you're doing.
"""
self.name = name
self.homepage = homepage
self.ws_server = ws_server
self.api_key = api_key
self.api_secret = api_secret
self.session_key = session_key
self.submission_server = submission_server
self.username = username
self.password_hash = password_hash
self.domain_names = domain_names
self.urls = urls
self.cache_backend = None
self.proxy_enabled = False
self.proxy = None
self.last_call_time = 0
#generate a session_key if necessary
if (self.api_key and self.api_secret) and not self.session_key and (self.username and self.password_hash):
sk_gen = SessionKeyGenerator(self)
self.session_key = sk_gen.get_session_key(self.username, self.password_hash)
"""def __repr__(self):
attributes = ("name", "homepage", "ws_server", "api_key", "api_secret", "session_key", "submission_server",
"username", "password_hash", "domain_names", "urls")
text = "pylast._Network(%s)"
args = []
for attr in attributes:
args.append("=".join((attr, repr(getattr(self, attr)))))
return text % ", ".join(args)
"""
def __str__(self):
return "The %s Network" %self.name
def get_artist(self, artist_name):
"""
Return an Artist object
"""
return Artist(artist_name, self)
def get_track(self, artist, title):
"""
Return a Track object
"""
return Track(artist, title, self)
def get_album(self, artist, title):
"""
Return an Album object
"""
return Album(artist, title, self)
def get_authenticated_user(self):
"""
Returns the authenticated user
"""
return AuthenticatedUser(self)
def get_country(self, country_name):
"""
Returns a country object
"""
return Country(country_name, self)
def get_group(self, name):
"""
Returns a Group object
"""
return Group(name, self)
def get_user(self, username):
"""
Returns a user object
"""
return User(username, self)
def get_tag(self, name):
"""
Returns a tag object
"""
return Tag(name, self)
def get_scrobbler(self, client_id, client_version):
"""
Returns a Scrobbler object used for submitting tracks to the server
Quote from http://www.last.fm/api/submissions:
========
Client identifiers are used to provide a centrally managed database of
the client versions, allowing clients to be banned if they are found to
be behaving undesirably. The client ID is associated with a version
number on the server, however these are only incremented if a client is
banned and do not have to reflect the version of the actual client application.
During development, clients which have not been allocated an identifier should
use the identifier tst, with a version number of 1.0. Do not distribute code or
client implementations which use this test identifier. Do not use the identifiers
used by other clients.
=========
To obtain a new client identifier please contact:
* Last.fm: submissions@last.fm
* # TODO: list others
...and provide us with the name of your client and its homepage address.
"""
_deprecation_warning("Use _Network.scrobble(...), _Network.scrobble_many(...), and Netowrk.update_now_playing(...) instead")
return Scrobbler(self, client_id, client_version)
def _get_language_domain(self, domain_language):
"""
Returns the mapped domain name of the network to a DOMAIN_* value
"""
if domain_language in self.domain_names:
return self.domain_names[domain_language]
def _get_url(self, domain, type):
return "http://%s/%s" %(self._get_language_domain(domain), self.urls[type])
def _get_ws_auth(self):
"""
Returns a (API_KEY, API_SECRET, SESSION_KEY) tuple.
"""
return (self.api_key, self.api_secret, self.session_key)
def _delay_call(self):
"""
Makes sure that web service calls are at least a second apart
"""
# delay time in seconds
DELAY_TIME = 1.0
now = time.time()
if (now - self.last_call_time) < DELAY_TIME:
time.sleep(1)
self.last_call_time = now
def create_new_playlist(self, title, description):
"""
Creates a playlist for the authenticated user and returns it
title: The title of the new playlist.
description: The description of the new playlist.
"""
params = {}
params['title'] = title
params['description'] = description
doc = _Request(self, 'playlist.create', params).execute(False)
e_id = doc.getElementsByTagName("id")[0].firstChild.data
user = doc.getElementsByTagName('playlists')[0].getAttribute('user')
return Playlist(user, e_id, self)
def get_top_tags(self, limit=None):
"""Returns a sequence of the most used tags as a sequence of TopItem objects."""
doc = _Request(self, "tag.getTopTags").execute(True)
seq = []
for node in doc.getElementsByTagName("tag"):
tag = Tag(_extract(node, "name"), self)
weight = _number(_extract(node, "count"))
seq.append(TopItem(tag, weight))
if limit:
seq = seq[:limit]
return seq
def enable_proxy(self, host, port):
"""Enable a default web proxy"""
self.proxy = [host, _number(port)]
self.proxy_enabled = True
def disable_proxy(self):
"""Disable using the web proxy"""
self.proxy_enabled = False
def is_proxy_enabled(self):
"""Returns True if a web proxy is enabled."""
return self.proxy_enabled
def _get_proxy(self):
"""Returns proxy details."""
return self.proxy
def enable_caching(self, file_path = None):
"""Enables caching request-wide for all cachable calls.
* file_path: A file path for the backend storage file. If
None set, a temp file would probably be created, according the backend.
"""
if not file_path:
file_path = tempfile.mktemp(prefix="pylast_tmp_")
self.cache_backend = _ShelfCacheBackend(file_path)
def disable_caching(self):
"""Disables all caching features."""
self.cache_backend = None
def is_caching_enabled(self):
"""Returns True if caching is enabled."""
return not (self.cache_backend == None)
def _get_cache_backend(self):
return self.cache_backend
def search_for_album(self, album_name):
"""Searches for an album by its name. Returns a AlbumSearch object.
Use get_next_page() to retreive sequences of results."""
return AlbumSearch(album_name, self)
def search_for_artist(self, artist_name):
"""Searches of an artist by its name. Returns a ArtistSearch object.
Use get_next_page() to retreive sequences of results."""
return ArtistSearch(artist_name, self)
def search_for_tag(self, tag_name):
"""Searches of a tag by its name. Returns a TagSearch object.
Use get_next_page() to retreive sequences of results."""
return TagSearch(tag_name, self)
def search_for_track(self, artist_name, track_name):
"""Searches of a track by its name and its artist. Set artist to an empty string if not available.
Returns a TrackSearch object.
Use get_next_page() to retreive sequences of results."""
return TrackSearch(artist_name, track_name, self)
def search_for_venue(self, venue_name, country_name):
"""Searches of a venue by its name and its country. Set country_name to an empty string if not available.
Returns a VenueSearch object.
Use get_next_page() to retreive sequences of results."""
return VenueSearch(venue_name, country_name, self)
def get_track_by_mbid(self, mbid):
"""Looks up a track by its MusicBrainz ID"""
params = {"mbid": mbid}
doc = _Request(self, "track.getInfo", params).execute(True)
return Track(_extract(doc, "name", 1), _extract(doc, "name"), self)
def get_artist_by_mbid(self, mbid):
"""Loooks up an artist by its MusicBrainz ID"""
params = {"mbid": mbid}
doc = _Request(self, "artist.getInfo", params).execute(True)
return Artist(_extract(doc, "name"), self)
def get_album_by_mbid(self, mbid):
"""Looks up an album by its MusicBrainz ID"""
params = {"mbid": mbid}
doc = _Request(self, "album.getInfo", params).execute(True)
return Album(_extract(doc, "artist"), _extract(doc, "name"), self)
def update_now_playing(self, artist, title, album = None, album_artist = None,
duration = None, track_number = None, mbid = None, context = None):
"""
Used to notify Last.fm that a user has started listening to a track.
Parameters:
artist (Required) : The artist name
title (Required) : The track title
album (Optional) : The album name.
album_artist (Optional) : The album artist - if this differs from the track artist.
duration (Optional) : The length of the track in seconds.
track_number (Optional) : The track number of the track on the album.
mbid (Optional) : The MusicBrainz Track ID.
context (Optional) : Sub-client version (not public, only enabled for certain API keys)
"""
params = {"track": title, "artist": artist}
if album: params["album"] = album
if album_artist: params["albumArtist"] = album_artist
if context: params["context"] = context
if track_number: params["trackNumber"] = track_number
if mbid: params["mbid"] = mbid
if duration: params["duration"] = duration
_Request(self, "track.updateNowPlaying", params).execute()
def scrobble(self, artist, title, timestamp, album = None, album_artist = None, track_number = None,
duration = None, stream_id = None, context = None, mbid = None):
"""Used to add a track-play to a user's profile.
Parameters:
artist (Required) : The artist name.
title (Required) : The track name.
timestamp (Required) : The time the track started playing, in UNIX timestamp format (integer number of seconds since 00:00:00, January 1st 1970 UTC). This must be in the UTC time zone.
album (Optional) : The album name.
album_artist (Optional) : The album artist - if this differs from the track artist.
context (Optional) : Sub-client version (not public, only enabled for certain API keys)
stream_id (Optional) : The stream id for this track received from the radio.getPlaylist service.
track_number (Optional) : The track number of the track on the album.
mbid (Optional) : The MusicBrainz Track ID.
duration (Optional) : The length of the track in seconds.
"""
return self.scrobble_many(({"artist": artist, "title": title, "timestamp": timestamp, "album": album, "album_artist": album_artist,
"track_number": track_number, "duration": duration, "stream_id": stream_id, "context": context, "mbid": mbid},))
def scrobble_many(self, tracks):
"""
Used to scrobble a batch of tracks at once. The parameter tracks is a sequence of dicts per
track containing the keyword arguments as if passed to the scrobble() method.
"""
tracks_to_scrobble = tracks[:50]
if len(tracks) > 50:
remaining_tracks = tracks[50:]
else:
remaining_tracks = None
params = {}
for i in range(len(tracks_to_scrobble)):
params["artist[%d]" % i] = tracks_to_scrobble[i]["artist"]
params["track[%d]" % i] = tracks_to_scrobble[i]["title"]
additional_args = ("timestamp", "album", "album_artist", "context", "stream_id", "track_number", "mbid", "duration")
args_map_to = {"album_artist": "albumArtist", "track_number": "trackNumber", "stream_id": "streamID"} # so friggin lazy
for arg in additional_args:
if arg in tracks_to_scrobble[i] and tracks_to_scrobble[i][arg]:
if arg in args_map_to:
maps_to = args_map_to[arg]
else:
maps_to = arg
params["%s[%d]" %(maps_to, i)] = tracks_to_scrobble[i][arg]
_Request(self, "track.scrobble", params).execute()
if remaining_tracks:
self.scrobble_many(remaining_tracks)
class LastFMNetwork(_Network):
"""A Last.fm network object
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
Either a valid session_key or a combination of username and password_hash must be present for scrobbling.
Most read-only webservices only require an api_key and an api_secret, see about obtaining them from:
http://www.last.fm/api/account
"""
def __init__(self, api_key="", api_secret="", session_key="", username="", password_hash=""):
_Network.__init__(self,
name = "Last.fm",
homepage = "http://last.fm",
ws_server = ("ws.audioscrobbler.com", "/2.0/"),
api_key = api_key if len(api_key)>0 else "af9375b915eab329f530fd23b90a6114",
api_secret = api_secret if len(api_secret)>0 else "db5a3127c0226967f2659763f0e877cc",
session_key = session_key,
submission_server = "http://post.audioscrobbler.com:80/",
username = username,
password_hash = password_hash,
domain_names = {
DOMAIN_ENGLISH: 'www.last.fm',
DOMAIN_GERMAN: 'www.lastfm.de',
DOMAIN_SPANISH: 'www.lastfm.es',
DOMAIN_FRENCH: 'www.lastfm.fr',
DOMAIN_ITALIAN: 'www.lastfm.it',
DOMAIN_POLISH: 'www.lastfm.pl',
DOMAIN_PORTUGUESE: 'www.lastfm.com.br',
DOMAIN_SWEDISH: 'www.lastfm.se',
DOMAIN_TURKISH: 'www.lastfm.com.tr',
DOMAIN_RUSSIAN: 'www.lastfm.ru',
DOMAIN_JAPANESE: 'www.lastfm.jp',
DOMAIN_CHINESE: 'cn.last.fm',
},
urls = {
"album": "music/%(artist)s/%(album)s",
"artist": "music/%(artist)s",
"event": "event/%(id)s",
"country": "place/%(country_name)s",
"playlist": "user/%(user)s/library/playlists/%(appendix)s",
"tag": "tag/%(name)s",
"track": "music/%(artist)s/_/%(title)s",
"group": "group/%(name)s",
"user": "user/%(name)s",
}
)
def __repr__(self):
return "pylast.LastFMNetwork(%s)" %(", ".join(("'%s'" %self.api_key, "'%s'" %self.api_secret, "'%s'" %self.session_key,
"'%s'" %self.username, "'%s'" %self.password_hash)))
def __str__(self):
return "LastFM Network"
def get_lastfm_network(api_key="", api_secret="", session_key = "", username = "", password_hash = ""):
"""
Returns a preconfigured _Network object for Last.fm
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
Either a valid session_key or a combination of username and password_hash must be present for scrobbling.
Most read-only webservices only require an api_key and an api_secret, see about obtaining them from:
http://www.last.fm/api/account
"""
_deprecation_warning("Create a LastFMNetwork object instead")
return LastFMNetwork(api_key, api_secret, session_key, username, password_hash)
class LibreFMNetwork(_Network):
"""
A preconfigured _Network object for Libre.fm
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
"""
def __init__(self, api_key="", api_secret="", session_key = "", username = "", password_hash = ""):
_Network.__init__(self,
name = "Libre.fm",
homepage = "http://alpha.dev.libre.fm",
ws_server = ("alpha.dev.libre.fm", "/2.0/"),
api_key = api_key,
api_secret = api_secret,
session_key = session_key,
submission_server = "http://turtle.libre.fm:80/",
username = username,
password_hash = password_hash,
domain_names = {
DOMAIN_ENGLISH: "alpha.dev.libre.fm",
DOMAIN_GERMAN: "alpha.dev.libre.fm",
DOMAIN_SPANISH: "alpha.dev.libre.fm",
DOMAIN_FRENCH: "alpha.dev.libre.fm",
DOMAIN_ITALIAN: "alpha.dev.libre.fm",
DOMAIN_POLISH: "alpha.dev.libre.fm",
DOMAIN_PORTUGUESE: "alpha.dev.libre.fm",
DOMAIN_SWEDISH: "alpha.dev.libre.fm",
DOMAIN_TURKISH: "alpha.dev.libre.fm",
DOMAIN_RUSSIAN: "alpha.dev.libre.fm",
DOMAIN_JAPANESE: "alpha.dev.libre.fm",
DOMAIN_CHINESE: "alpha.dev.libre.fm",
},
urls = {
"album": "artist/%(artist)s/album/%(album)s",
"artist": "artist/%(artist)s",
"event": "event/%(id)s",
"country": "place/%(country_name)s",
"playlist": "user/%(user)s/library/playlists/%(appendix)s",
"tag": "tag/%(name)s",
"track": "music/%(artist)s/_/%(title)s",
"group": "group/%(name)s",
"user": "user/%(name)s",
}
)
def __repr__(self):
return "pylast.LibreFMNetwork(%s)" %(", ".join(("'%s'" %self.api_key, "'%s'" %self.api_secret, "'%s'" %self.session_key,
"'%s'" %self.username, "'%s'" %self.password_hash)))
def __str__(self):
return "Libre.fm Network"
def get_librefm_network(api_key="", api_secret="", session_key = "", username = "", password_hash = ""):
"""
Returns a preconfigured _Network object for Libre.fm
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
"""
_deprecation_warning("DeprecationWarning: Create a LibreFMNetwork object instead")
return LibreFMNetwork(api_key, api_secret, session_key, username, password_hash)
class _ShelfCacheBackend(object):
"""Used as a backend for caching cacheable requests."""
def __init__(self, file_path = None):
self.shelf = shelve.open(file_path)
def get_xml(self, key):
return self.shelf[key]
def set_xml(self, key, xml_string):
self.shelf[key] = xml_string
def has_key(self, key):
return key in self.shelf.keys()
class _Request(object):
"""Representing an abstract web service operation."""
def __init__(self, network, method_name, params = {}):
self.network = network
self.params = {}
for key in params:
self.params[key] = _unicode(params[key])
(self.api_key, self.api_secret, self.session_key) = network._get_ws_auth()
self.params["api_key"] = self.api_key
self.params["method"] = method_name
if network.is_caching_enabled():
self.cache = network._get_cache_backend()
if self.session_key:
self.params["sk"] = self.session_key
self.sign_it()
def sign_it(self):
"""Sign this request."""
if not "api_sig" in self.params.keys():
self.params['api_sig'] = self._get_signature()
def _get_signature(self):
"""Returns a 32-character hexadecimal md5 hash of the signature string."""
keys = list(self.params.keys())
keys.sort()
string = ""
for name in keys:
string += name
string += self.params[name]
string += self.api_secret
return md5(string)
def _get_cache_key(self):
"""The cache key is a string of concatenated sorted names and values."""
keys = list(self.params.keys())
keys.sort()
cache_key = str()
for key in keys:
if key != "api_sig" and key != "api_key" and key != "sk":
cache_key += key + _string(self.params[key])
return hashlib.sha1(cache_key).hexdigest()
def _get_cached_response(self):
"""Returns a file object of the cached response."""
if not self._is_cached():
response = self._download_response()
self.cache.set_xml(self._get_cache_key(), response)
return self.cache.get_xml(self._get_cache_key())
def _is_cached(self):
"""Returns True if the request is already in cache."""
return self.cache.has_key(self._get_cache_key())
def _download_response(self):
"""Returns a response body string from the server."""
# Delay the call if necessary
#self.network._delay_call() # enable it if you want.
data = []
for name in self.params.keys():
data.append('='.join((name, url_quote_plus(_string(self.params[name])))))
data = '&'.join(data)
headers = {
"Content-type": "application/x-www-form-urlencoded",
'Accept-Charset': 'utf-8',
'User-Agent': "pylast" + '/' + __version__
}
(HOST_NAME, HOST_SUBDIR) = self.network.ws_server
if self.network.is_proxy_enabled():
conn = HTTPConnection(host = self._get_proxy()[0], port = self._get_proxy()[1])
try:
conn.request(method='POST', url="http://" + HOST_NAME + HOST_SUBDIR,
body=data, headers=headers)
except Exception as e:
raise NetworkError(self.network, e)
else:
conn = HTTPConnection(host=HOST_NAME)
try:
conn.request(method='POST', url=HOST_SUBDIR, body=data, headers=headers)
except Exception as e:
raise NetworkError(self.network, e)
try:
response_text = _unicode(conn.getresponse().read())
except Exception as e:
raise MalformedResponseError(self.network, e)
self._check_response_for_errors(response_text)
return response_text
def execute(self, cacheable = False):
"""Returns the XML DOM response of the POST Request from the server"""
if self.network.is_caching_enabled() and cacheable:
response = self._get_cached_response()
else:
response = self._download_response()
return minidom.parseString(_string(response))
def _check_response_for_errors(self, response):
"""Checks the response for errors and raises one if any exists."""
try:
doc = minidom.parseString(_string(response))
except Exception as e:
raise MalformedResponseError(self.network, e)
e = doc.getElementsByTagName('lfm')[0]
if e.getAttribute('status') != "ok":
e = doc.getElementsByTagName('error')[0]
status = e.getAttribute('code')
details = e.firstChild.data.strip()
raise WSError(self.network, status, details)
class SessionKeyGenerator(object):
"""Methods of generating a session key:
1) Web Authentication:
a. network = get_*_network(API_KEY, API_SECRET)
b. sg = SessionKeyGenerator(network)
c. url = sg.get_web_auth_url()
d. Ask the user to open the url and authorize you, and wait for it.
e. session_key = sg.get_web_auth_session_key(url)
2) Username and Password Authentication:
a. network = get_*_network(API_KEY, API_SECRET)
b. username = raw_input("Please enter your username: ")
c. password_hash = pylast.md5(raw_input("Please enter your password: ")
d. session_key = SessionKeyGenerator(network).get_session_key(username, password_hash)
A session key's lifetime is infinie, unless the user provokes the rights of the given API Key.
If you create a Network object with just a API_KEY and API_SECRET and a username and a password_hash, a
SESSION_KEY will be automatically generated for that network and stored in it so you don't have to do this
manually, unless you want to.
"""
def __init__(self, network):
self.network = network
self.web_auth_tokens = {}
def _get_web_auth_token(self):
"""Retrieves a token from the network for web authentication.
The token then has to be authorized from getAuthURL before creating session.
"""
request = _Request(self.network, 'auth.getToken')
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
e = doc.getElementsByTagName('token')[0]
return e.firstChild.data
def get_web_auth_url(self):
"""The user must open this page, and you first, then call get_web_auth_session_key(url) after that."""
token = self._get_web_auth_token()
url = '%(homepage)s/api/auth/?api_key=%(api)s&token=%(token)s' % \
{"homepage": self.network.homepage, "api": self.network.api_key, "token": token}
self.web_auth_tokens[url] = token
return url
def get_web_auth_session_key(self, url):
"""Retrieves the session key of a web authorization process by its url."""
if url in self.web_auth_tokens.keys():
token = self.web_auth_tokens[url]
else:
token = "" #that's gonna raise a WSError of an unauthorized token when the request is executed.
request = _Request(self.network, 'auth.getSession', {'token': token})
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
return doc.getElementsByTagName('key')[0].firstChild.data
def get_session_key(self, username, password_hash):
"""Retrieve a session key with a username and a md5 hash of the user's password."""
params = {"username": username, "authToken": md5(username + password_hash)}
request = _Request(self.network, "auth.getMobileSession", params)
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
return _extract(doc, "key")
TopItem = collections.namedtuple("TopItem", ["item", "weight"])
SimilarItem = collections.namedtuple("SimilarItem", ["item", "match"])
LibraryItem = collections.namedtuple("LibraryItem", ["item", "playcount", "tagcount"])
PlayedTrack = collections.namedtuple("PlayedTrack", ["track", "playback_date", "timestamp"])
LovedTrack = collections.namedtuple("LovedTrack", ["track", "date", "timestamp"])
ImageSizes = collections.namedtuple("ImageSizes", ["original", "large", "largesquare", "medium", "small", "extralarge"])
Image = collections.namedtuple("Image", ["title", "url", "dateadded", "format", "owner", "sizes", "votes"])
Shout = collections.namedtuple("Shout", ["body", "author", "date"])
def _string_output(funct):
def r(*args):
return _string(funct(*args))
return r
def _pad_list(given_list, desired_length, padding = None):
"""
Pads a list to be of the desired_length.
"""
while len(given_list) < desired_length:
given_list.append(padding)
return given_list
class _BaseObject(object):
"""An abstract webservices object."""
network = None
def __init__(self, network):
self.network = network
def _request(self, method_name, cacheable = False, params = None):
if not params:
params = self._get_params()
return _Request(self.network, method_name, params).execute(cacheable)
def _get_params(self):
"""Returns the most common set of parameters between all objects."""
return {}
def __hash__(self):
return hash(self.network) + \
hash(str(type(self)) + "".join(list(self._get_params().keys()) + list(self._get_params().values())).lower())
class _Taggable(object):
"""Common functions for classes with tags."""
def __init__(self, ws_prefix):
self.ws_prefix = ws_prefix
def add_tags(self, tags):
"""Adds one or several tags.
* tags: A sequence of tag names or Tag objects.
"""
for tag in tags:
self.add_tag(tag)
def add_tag(self, tag):
"""Adds one tag.
* tag: a tag name or a Tag object.
"""
if isinstance(tag, Tag):
tag = tag.get_name()
params = self._get_params()
params['tags'] = tag
self._request(self.ws_prefix + '.addTags', False, params)
def remove_tag(self, tag):
"""Remove a user's tag from this object."""
if isinstance(tag, Tag):
tag = tag.get_name()
params = self._get_params()
params['tag'] = tag
self._request(self.ws_prefix + '.removeTag', False, params)
def get_tags(self):
"""Returns a list of the tags set by the user to this object."""
# Uncacheable because it can be dynamically changed by the user.
params = self._get_params()
doc = self._request(self.ws_prefix + '.getTags', False, params)
tag_names = _extract_all(doc, 'name')
tags = []
for tag in tag_names:
tags.append(Tag(tag, self.network))
return tags
def remove_tags(self, tags):
"""Removes one or several tags from this object.
* tags: a sequence of tag names or Tag objects.
"""
for tag in tags:
self.remove_tag(tag)
def clear_tags(self):
"""Clears all the user-set tags. """
self.remove_tags(*(self.get_tags()))
def set_tags(self, tags):
"""Sets this object's tags to only those tags.
* tags: a sequence of tag names or Tag objects.
"""
c_old_tags = []
old_tags = []
c_new_tags = []
new_tags = []
to_remove = []
to_add = []
tags_on_server = self.get_tags()
for tag in tags_on_server:
c_old_tags.append(tag.get_name().lower())
old_tags.append(tag.get_name())
for tag in tags:
c_new_tags.append(tag.lower())
new_tags.append(tag)
for i in range(0, len(old_tags)):
if not c_old_tags[i] in c_new_tags:
to_remove.append(old_tags[i])
for i in range(0, len(new_tags)):
if not c_new_tags[i] in c_old_tags:
to_add.append(new_tags[i])
self.remove_tags(to_remove)
self.add_tags(to_add)
def get_top_tags(self, limit=None):
"""Returns a list of the most frequently used Tags on this object."""
doc = self._request(self.ws_prefix + '.getTopTags', True)
elements = doc.getElementsByTagName('tag')
seq = []
for element in elements:
tag_name = _extract(element, 'name')
tagcount = _extract(element, 'count')
seq.append(TopItem(Tag(tag_name, self.network), tagcount))
if limit:
seq = seq[:limit]
return seq
class WSError(Exception):
"""Exception related to the Network web service"""
def __init__(self, network, status, details):
self.status = status
self.details = details
self.network = network
@_string_output
def __str__(self):
return self.details
def get_id(self):
"""Returns the exception ID, from one of the following:
STATUS_INVALID_SERVICE = 2
STATUS_INVALID_METHOD = 3
STATUS_AUTH_FAILED = 4
STATUS_INVALID_FORMAT = 5
STATUS_INVALID_PARAMS = 6
STATUS_INVALID_RESOURCE = 7
STATUS_TOKEN_ERROR = 8
STATUS_INVALID_SK = 9
STATUS_INVALID_API_KEY = 10
STATUS_OFFLINE = 11
STATUS_SUBSCRIBERS_ONLY = 12
STATUS_TOKEN_UNAUTHORIZED = 14
STATUS_TOKEN_EXPIRED = 15
"""
return self.status
class MalformedResponseError(Exception):
"""Exception conveying a malformed response from Last.fm."""
def __init__(self, network, underlying_error):
self.network = network
self.underlying_error = underlying_error
def __str__(self):
return "Malformed response from Last.fm. Underlying error: %s" %str(self.underlying_error)
class NetworkError(Exception):
"""Exception conveying a problem in sending a request to Last.fm"""
def __init__(self, network, underlying_error):
self.network = network
self.underlying_error = underlying_error
def __str__(self):
return "NetworkError: %s" %str(self.underlying_error)
class Album(_BaseObject, _Taggable):
"""An album."""
title = None
artist = None
def __init__(self, artist, title, network):
"""
Create an album instance.
# Parameters:
* artist: An artist name or an Artist object.
* title: The album title.
"""
_BaseObject.__init__(self, network)
_Taggable.__init__(self, 'album')
if isinstance(artist, Artist):
self.artist = artist
else:
self.artist = Artist(artist, self.network)
self.title = title
def __repr__(self):
return "pylast.Album(%s, %s, %s)" %(repr(self.artist.name), repr(self.title), repr(self.network))
@_string_output
def __str__(self):
return _unicode("%s - %s") %(self.get_artist().get_name(), self.get_title())
def __eq__(self, other):
return (self.get_title().lower() == other.get_title().lower()) and (self.get_artist().get_name().lower() == other.get_artist().get_name().lower())
def __ne__(self, other):
return (self.get_title().lower() != other.get_title().lower()) or (self.get_artist().get_name().lower() != other.get_artist().get_name().lower())
def _get_params(self):
return {'artist': self.get_artist().get_name(), 'album': self.get_title(), }
def get_artist(self):
"""Returns the associated Artist object."""
return self.artist
def get_title(self):
"""Returns the album title."""
return self.title
def get_name(self):
"""Returns the album title (alias to Album.get_title)."""
return self.get_title()
def get_release_date(self):
"""Retruns the release date of the album."""
return _extract(self._request("album.getInfo", cacheable = True), "releasedate")
def get_cover_image(self, size = COVER_EXTRA_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract_all(self._request("album.getInfo", cacheable = True), 'image')[size]
def get_id(self):
"""Returns the ID"""
return _extract(self._request("album.getInfo", cacheable = True), "id")
def get_playcount(self):
"""Returns the number of plays on the network"""
return _number(_extract(self._request("album.getInfo", cacheable = True), "playcount"))
def get_listener_count(self):
"""Returns the number of liteners on the network"""
return _number(_extract(self._request("album.getInfo", cacheable = True), "listeners"))
def get_top_tags(self, limit=None):
"""Returns a list of the most-applied tags to this album."""
doc = self._request("album.getInfo", True)
e = doc.getElementsByTagName("toptags")[0]
seq = []
for name in _extract_all(e, "name"):
seq.append(Tag(name, self.network))
if limit:
seq = seq[:limit]
return seq
def get_tracks(self):
"""Returns the list of Tracks on this album."""
uri = 'lastfm://playlist/album/%s' %self.get_id()
return XSPF(uri, self.network).get_tracks()
def get_mbid(self):
"""Returns the MusicBrainz id of the album."""
return _extract(self._request("album.getInfo", cacheable = True), "mbid")
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the album page on the network.
# Parameters:
* domain_name str: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_artist().get_name())
album = _url_safe(self.get_title())
return self.network._get_url(domain_name, "album") %{'artist': artist, 'album': album}
def get_wiki_published_date(self):
"""Returns the date of publishing this version of the wiki."""
doc = self._request("album.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "published")
def get_wiki_summary(self):
"""Returns the summary of the wiki."""
doc = self._request("album.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "summary")
def get_wiki_content(self):
"""Returns the content of the wiki."""
doc = self._request("album.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "content")
class Artist(_BaseObject, _Taggable):
"""An artist."""
name = None
def __init__(self, name, network):
"""Create an artist object.
# Parameters:
* name str: The artist's name.
"""
_BaseObject.__init__(self, network)
_Taggable.__init__(self, 'artist')
self.name = name
def __repr__(self):
return "pylast.Artist(%s, %s)" %(repr(self.get_name()), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name().lower() != other.get_name().lower()
def _get_params(self):
return {'artist': self.get_name()}
def get_name(self, properly_capitalized=False):
"""Returns the name of the artist.
If properly_capitalized was asserted then the name would be downloaded
overwriting the given one."""
if properly_capitalized:
self.name = _extract(self._request("artist.getInfo", True), "name")
return self.name
def get_cover_image(self, size = COVER_MEGA):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract_all(self._request("artist.getInfo", True), "image")[size]
def get_playcount(self):
"""Returns the number of plays on the network."""
return _number(_extract(self._request("artist.getInfo", True), "playcount"))
def get_mbid(self):
"""Returns the MusicBrainz ID of this artist."""
doc = self._request("artist.getInfo", True)
return _extract(doc, "mbid")
def get_listener_count(self):
"""Returns the number of liteners on the network."""
if hasattr(self, "listener_count"):
return self.listener_count
else:
self.listener_count = _number(_extract(self._request("artist.getInfo", True), "listeners"))
return self.listener_count
def is_streamable(self):
"""Returns True if the artist is streamable."""
return bool(_number(_extract(self._request("artist.getInfo", True), "streamable")))
def get_bio_published_date(self):
"""Returns the date on which the artist's biography was published."""
return _extract(self._request("artist.getInfo", True), "published")
def get_bio_summary(self, language=None):
"""Returns the summary of the artist's biography."""
if language:
params = self._get_params()
params["lang"] = language
else:
params = None
return _extract(self._request("artist.getInfo", True, params), "summary")
def get_bio_content(self, language=None):
"""Returns the content of the artist's biography."""
if language:
params = self._get_params()
params["lang"] = language
else:
params = None
return _extract(self._request("artist.getInfo", True, params), "content")
def get_upcoming_events(self):
"""Returns a list of the upcoming Events for this artist."""
doc = self._request('artist.getEvents', True)
ids = _extract_all(doc, 'id')
events = []
for e_id in ids:
events.append(Event(e_id, self.network))
return events
def get_similar(self, limit = None):
"""Returns the similar artists on the network."""
params = self._get_params()
if limit:
params['limit'] = limit
doc = self._request('artist.getSimilar', True, params)
names = _extract_all(doc, "name")
matches = _extract_all(doc, "match")
artists = []
for i in range(0, len(names)):
artists.append(SimilarItem(Artist(names[i], self.network), _number(matches[i])))
return artists
def get_top_albums(self):
"""Retuns a list of the top albums."""
doc = self._request('artist.getTopAlbums', True)
seq = []
for node in doc.getElementsByTagName("album"):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _extract(node, "playcount")
seq.append(TopItem(Album(artist, name, self.network), playcount))
return seq
def get_top_tracks(self):
"""Returns a list of the most played Tracks by this artist."""
doc = self._request("artist.getTopTracks", True)
seq = []
for track in doc.getElementsByTagName('track'):
title = _extract(track, "name")
artist = _extract(track, "name", 1)
playcount = _number(_extract(track, "playcount"))
seq.append( TopItem(Track(artist, title, self.network), playcount) )
return seq
def get_top_fans(self, limit = None):
"""Returns a list of the Users who played this artist the most.
# Parameters:
* limit int: Max elements.
"""
doc = self._request('artist.getTopFans', True)
seq = []
elements = doc.getElementsByTagName('user')
for element in elements:
if limit and len(seq) >= limit:
break
name = _extract(element, 'name')
weight = _number(_extract(element, 'weight'))
seq.append(TopItem(User(name, self.network), weight))
return seq
def share(self, users, message = None):
"""Shares this artist (sends out recommendations).
# Parameters:
* users [User|str,]: A list that can contain usernames, emails, User objects, or all of them.
* message str: A message to include in the recommendation message.
"""
#last.fm currently accepts a max of 10 recipient at a time
while(len(users) > 10):
section = users[0:9]
users = users[9:]
self.share(section, message)
nusers = []
for user in users:
if isinstance(user, User):
nusers.append(user.get_name())
else:
nusers.append(user)
params = self._get_params()
recipients = ','.join(nusers)
params['recipient'] = recipients
if message:
params['message'] = message
self._request('artist.share', False, params)
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the artist page on the network.
# Parameters:
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_name())
return self.network._get_url(domain_name, "artist") %{'artist': artist}
def get_images(self, order=IMAGES_ORDER_POPULARITY, limit=None):
"""
Returns a sequence of Image objects
if limit is None it will return all
order can be IMAGES_ORDER_POPULARITY or IMAGES_ORDER_DATE.
If limit==None, it will try to pull all the available data.
"""
images = []
params = self._get_params()
params["order"] = order
nodes = _collect_nodes(limit, self, "artist.getImages", True, params)
for e in nodes:
if _extract(e, "name"):
user = User(_extract(e, "name"), self.network)
else:
user = None
images.append(Image(
_extract(e, "title"),
_extract(e, "url"),
_extract(e, "dateadded"),
_extract(e, "format"),
user,
ImageSizes(*_extract_all(e, "size")),
(_extract(e, "thumbsup"), _extract(e, "thumbsdown"))
)
)
return images
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "artist.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("artist.Shout", False, params)
class Event(_BaseObject):
"""An event."""
id = None
def __init__(self, event_id, network):
_BaseObject.__init__(self, network)
self.id = event_id
def __repr__(self):
return "pylast.Event(%s, %s)" %(repr(self.id), repr(self.network))
@_string_output
def __str__(self):
return "Event #" + self.get_id()
def __eq__(self, other):
return self.get_id() == other.get_id()
def __ne__(self, other):
return self.get_id() != other.get_id()
def _get_params(self):
return {'event': self.get_id()}
def attend(self, attending_status):
"""Sets the attending status.
* attending_status: The attending status. Possible values:
o EVENT_ATTENDING
o EVENT_MAYBE_ATTENDING
o EVENT_NOT_ATTENDING
"""
params = self._get_params()
params['status'] = attending_status
self._request('event.attend', False, params)
def get_attendees(self):
"""
Get a list of attendees for an event
"""
doc = self._request("event.getAttendees", False)
users = []
for name in _extract_all(doc, "name"):
users.append(User(name, self.network))
return users
def get_id(self):
"""Returns the id of the event on the network. """
return self.id
def get_title(self):
"""Returns the title of the event. """
doc = self._request("event.getInfo", True)
return _extract(doc, "title")
def get_headliner(self):
"""Returns the headliner of the event. """
doc = self._request("event.getInfo", True)
return Artist(_extract(doc, "headliner"), self.network)
def get_artists(self):
"""Returns a list of the participating Artists. """
doc = self._request("event.getInfo", True)
names = _extract_all(doc, "artist")
artists = []
for name in names:
artists.append(Artist(name, self.network))
return artists
def get_venue(self):
"""Returns the venue where the event is held."""
doc = self._request("event.getInfo", True)
v = doc.getElementsByTagName("venue")[0]
venue_id = _number(_extract(v, "id"))
return Venue(venue_id, self.network)
def get_start_date(self):
"""Returns the date when the event starts."""
doc = self._request("event.getInfo", True)
return _extract(doc, "startDate")
def get_description(self):
"""Returns the description of the event. """
doc = self._request("event.getInfo", True)
return _extract(doc, "description")
def get_cover_image(self, size = COVER_MEGA):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
doc = self._request("event.getInfo", True)
return _extract_all(doc, "image")[size]
def get_attendance_count(self):
"""Returns the number of attending people. """
doc = self._request("event.getInfo", True)
return _number(_extract(doc, "attendance"))
def get_review_count(self):
"""Returns the number of available reviews for this event. """
doc = self._request("event.getInfo", True)
return _number(_extract(doc, "reviews"))
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the event page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
return self.network._get_url(domain_name, "event") %{'id': self.get_id()}
def share(self, users, message = None):
"""Shares this event (sends out recommendations).
* users: A list that can contain usernames, emails, User objects, or all of them.
* message: A message to include in the recommendation message.
"""
#last.fm currently accepts a max of 10 recipient at a time
while(len(users) > 10):
section = users[0:9]
users = users[9:]
self.share(section, message)
nusers = []
for user in users:
if isinstance(user, User):
nusers.append(user.get_name())
else:
nusers.append(user)
params = self._get_params()
recipients = ','.join(nusers)
params['recipient'] = recipients
if message:
params['message'] = message
self._request('event.share', False, params)
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "event.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("event.Shout", False, params)
class Country(_BaseObject):
"""A country at Last.fm."""
name = None
def __init__(self, name, network):
_BaseObject.__init__(self, network)
self.name = name
def __repr__(self):
return "pylast.Country(%s, %s)" %(repr(self.name), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name() != other.get_name()
def _get_params(self):
return {'country': self.get_name()}
def _get_name_from_code(self, alpha2code):
# TODO: Have this function lookup the alpha-2 code and return the country name.
return alpha2code
def get_name(self):
"""Returns the country name. """
return self.name
def get_top_artists(self):
"""Returns a sequence of the most played artists."""
doc = self._request('geo.getTopArtists', True)
seq = []
for node in doc.getElementsByTagName("artist"):
name = _extract(node, 'name')
playcount = _extract(node, "playcount")
seq.append(TopItem(Artist(name, self.network), playcount))
return seq
def get_top_tracks(self):
"""Returns a sequence of the most played tracks"""
doc = self._request("geo.getTopTracks", True)
seq = []
for n in doc.getElementsByTagName('track'):
title = _extract(n, 'name')
artist = _extract(n, 'name', 1)
playcount = _number(_extract(n, "playcount"))
seq.append( TopItem(Track(artist, title, self.network), playcount))
return seq
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the event page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
country_name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "country") %{'country_name': country_name}
class Library(_BaseObject):
"""A user's Last.fm library."""
user = None
def __init__(self, user, network):
_BaseObject.__init__(self, network)
if isinstance(user, User):
self.user = user
else:
self.user = User(user, self.network)
self._albums_index = 0
self._artists_index = 0
self._tracks_index = 0
def __repr__(self):
return "pylast.Library(%s, %s)" %(repr(self.user), repr(self.network))
@_string_output
def __str__(self):
return repr(self.get_user()) + "'s Library"
def _get_params(self):
return {'user': self.user.get_name()}
def get_user(self):
"""Returns the user who owns this library."""
return self.user
def add_album(self, album):
"""Add an album to this library."""
params = self._get_params()
params["artist"] = album.get_artist.get_name()
params["album"] = album.get_name()
self._request("library.addAlbum", False, params)
def add_artist(self, artist):
"""Add an artist to this library."""
params = self._get_params()
params["artist"] = artist.get_name()
self._request("library.addArtist", False, params)
def add_track(self, track):
"""Add a track to this library."""
params = self._get_params()
params["track"] = track.get_title()
self._request("library.addTrack", False, params)
def get_albums(self, artist=None, limit=50):
"""
Returns a sequence of Album objects
If no artist is specified, it will return all, sorted by playcount descendingly.
If limit==None it will return all (may take a while)
"""
params = self._get_params()
if artist:
params["artist"] = artist
seq = []
for node in _collect_nodes(limit, self, "library.getAlbums", True, params):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(Album(artist, name, self.network), playcount, tagcount))
return seq
def get_artists(self, limit=50):
"""
Returns a sequence of Album objects
if limit==None it will return all (may take a while)
"""
seq = []
for node in _collect_nodes(limit, self, "library.getArtists", True):
name = _extract(node, "name")
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(Artist(name, self.network), playcount, tagcount))
return seq
def get_tracks(self, artist=None, album=None, limit=50):
"""
Returns a sequence of Album objects
If limit==None it will return all (may take a while)
"""
params = self._get_params()
if artist:
params["artist"] = artist
if album:
params["album"] = album
seq = []
for node in _collect_nodes(limit, self, "library.getTracks", True, params):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(Track(artist, name, self.network), playcount, tagcount))
return seq
class Playlist(_BaseObject):
"""A Last.fm user playlist."""
id = None
user = None
def __init__(self, user, id, network):
_BaseObject.__init__(self, network)
if isinstance(user, User):
self.user = user
else:
self.user = User(user, self.network)
self.id = id
@_string_output
def __str__(self):
return repr(self.user) + "'s playlist # " + repr(self.id)
def _get_info_node(self):
"""Returns the node from user.getPlaylists where this playlist's info is."""
doc = self._request("user.getPlaylists", True)
for node in doc.getElementsByTagName("playlist"):
if _extract(node, "id") == str(self.get_id()):
return node
def _get_params(self):
return {'user': self.user.get_name(), 'playlistID': self.get_id()}
def get_id(self):
"""Returns the playlist id."""
return self.id
def get_user(self):
"""Returns the owner user of this playlist."""
return self.user
def get_tracks(self):
"""Returns a list of the tracks on this user playlist."""
uri = _unicode('lastfm://playlist/%s') %self.get_id()
return XSPF(uri, self.network).get_tracks()
def add_track(self, track):
"""Adds a Track to this Playlist."""
params = self._get_params()
params['artist'] = track.get_artist().get_name()
params['track'] = track.get_title()
self._request('playlist.addTrack', False, params)
def get_title(self):
"""Returns the title of this playlist."""
return _extract(self._get_info_node(), "title")
def get_creation_date(self):
"""Returns the creation date of this playlist."""
return _extract(self._get_info_node(), "date")
def get_size(self):
"""Returns the number of tracks in this playlist."""
return _number(_extract(self._get_info_node(), "size"))
def get_description(self):
"""Returns the description of this playlist."""
return _extract(self._get_info_node(), "description")
def get_duration(self):
"""Returns the duration of this playlist in milliseconds."""
return _number(_extract(self._get_info_node(), "duration"))
def is_streamable(self):
"""Returns True if the playlist is streamable.
For a playlist to be streamable, it needs at least 45 tracks by 15 different artists."""
if _extract(self._get_info_node(), "streamable") == '1':
return True
else:
return False
def has_track(self, track):
"""Checks to see if track is already in the playlist.
* track: Any Track object.
"""
return track in self.get_tracks()
def get_cover_image(self, size = COVER_EXTRA_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract(self._get_info_node(), "image")[size]
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the playlist on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
english_url = _extract(self._get_info_node(), "url")
appendix = english_url[english_url.rfind("/") + 1:]
return self.network._get_url(domain_name, "playlist") %{'appendix': appendix, "user": self.get_user().get_name()}
class Tag(_BaseObject):
"""A Last.fm object tag."""
# TODO: getWeeklyArtistChart (too lazy, i'll wait for when someone requests it)
name = None
def __init__(self, name, network):
_BaseObject.__init__(self, network)
self.name = name
def __repr__(self):
return "pylast.Tag(%s, %s)" %(repr(self.name), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name().lower() != other.get_name().lower()
def _get_params(self):
return {'tag': self.get_name()}
def get_name(self, properly_capitalized=False):
"""Returns the name of the tag. """
if properly_capitalized:
self.name = _extract(self._request("tag.getInfo", True), "name")
return self.name
def get_similar(self):
"""Returns the tags similar to this one, ordered by similarity. """
doc = self._request('tag.getSimilar', True)
seq = []
names = _extract_all(doc, 'name')
for name in names:
seq.append(Tag(name, self.network))
return seq
def get_top_albums(self):
"""Retuns a list of the top albums."""
doc = self._request('tag.getTopAlbums', True)
seq = []
for node in doc.getElementsByTagName("album"):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _extract(node, "playcount")
seq.append(TopItem(Album(artist, name, self.network), playcount))
return seq
def get_top_tracks(self):
"""Returns a list of the most played Tracks by this artist."""
doc = self._request("tag.getTopTracks", True)
seq = []
for track in doc.getElementsByTagName('track'):
title = _extract(track, "name")
artist = _extract(track, "name", 1)
playcount = _number(_extract(track, "playcount"))
seq.append( TopItem(Track(artist, title, self.network), playcount) )
return seq
def get_top_artists(self):
"""Returns a sequence of the most played artists."""
doc = self._request('tag.getTopArtists', True)
seq = []
for node in doc.getElementsByTagName("artist"):
name = _extract(node, 'name')
playcount = _extract(node, "playcount")
seq.append(TopItem(Artist(name, self.network), playcount))
return seq
def get_weekly_chart_dates(self):
"""Returns a list of From and To tuples for the available charts."""
doc = self._request("tag.getWeeklyChartList", True)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append( (node.getAttribute("from"), node.getAttribute("to")) )
return seq
def get_weekly_artist_charts(self, from_date = None, to_date = None):
"""Returns the weekly artist charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("tag.getWeeklyArtistChart", True, params)
seq = []
for node in doc.getElementsByTagName("artist"):
item = Artist(_extract(node, "name"), self.network)
weight = _number(_extract(node, "weight"))
seq.append(TopItem(item, weight))
return seq
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the tag page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "tag") %{'name': name}
class Track(_BaseObject, _Taggable):
"""A Last.fm track."""
artist = None
title = None
def __init__(self, artist, title, network):
_BaseObject.__init__(self, network)
_Taggable.__init__(self, 'track')
if isinstance(artist, Artist):
self.artist = artist
else:
self.artist = Artist(artist, self.network)
self.title = title
def __repr__(self):
return "pylast.Track(%s, %s, %s)" %(repr(self.artist.name), repr(self.title), repr(self.network))
@_string_output
def __str__(self):
return self.get_artist().get_name() + ' - ' + self.get_title()
def __eq__(self, other):
return (self.get_title().lower() == other.get_title().lower()) and (self.get_artist().get_name().lower() == other.get_artist().get_name().lower())
def __ne__(self, other):
return (self.get_title().lower() != other.get_title().lower()) or (self.get_artist().get_name().lower() != other.get_artist().get_name().lower())
def _get_params(self):
return {'artist': self.get_artist().get_name(), 'track': self.get_title()}
def get_artist(self):
"""Returns the associated Artist object."""
return self.artist
def get_title(self, properly_capitalized=False):
"""Returns the track title."""
if properly_capitalized:
self.title = _extract(self._request("track.getInfo", True), "name")
return self.title
def get_name(self, properly_capitalized=False):
"""Returns the track title (alias to Track.get_title)."""
return self.get_title(properly_capitalized)
def get_id(self):
"""Returns the track id on the network."""
doc = self._request("track.getInfo", True)
return _extract(doc, "id")
def get_duration(self):
"""Returns the track duration."""
doc = self._request("track.getInfo", True)
return _number(_extract(doc, "duration"))
def get_mbid(self):
"""Returns the MusicBrainz ID of this track."""
doc = self._request("track.getInfo", True)
return _extract(doc, "mbid")
def get_listener_count(self):
"""Returns the listener count."""
if hasattr(self, "listener_count"):
return self.listener_count
else:
doc = self._request("track.getInfo", True)
self.listener_count = _number(_extract(doc, "listeners"))
return self.listener_count
def get_playcount(self):
"""Returns the play count."""
doc = self._request("track.getInfo", True)
return _number(_extract(doc, "playcount"))
def is_streamable(self):
"""Returns True if the track is available at Last.fm."""
doc = self._request("track.getInfo", True)
return _extract(doc, "streamable") == "1"
def is_fulltrack_available(self):
"""Returns True if the fulltrack is available for streaming."""
doc = self._request("track.getInfo", True)
return doc.getElementsByTagName("streamable")[0].getAttribute("fulltrack") == "1"
def get_album(self):
"""Returns the album object of this track."""
doc = self._request("track.getInfo", True)
albums = doc.getElementsByTagName("album")
if len(albums) == 0:
return
node = doc.getElementsByTagName("album")[0]
return Album(_extract(node, "artist"), _extract(node, "title"), self.network)
def get_wiki_published_date(self):
"""Returns the date of publishing this version of the wiki."""
doc = self._request("track.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "published")
def get_wiki_summary(self):
"""Returns the summary of the wiki."""
doc = self._request("track.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "summary")
def get_wiki_content(self):
"""Returns the content of the wiki."""
doc = self._request("track.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "content")
def love(self):
"""Adds the track to the user's loved tracks. """
self._request('track.love')
def ban(self):
"""Ban this track from ever playing on the radio. """
self._request('track.ban')
def get_similar(self):
"""Returns similar tracks for this track on the network, based on listening data. """
doc = self._request('track.getSimilar', True)
seq = []
for node in doc.getElementsByTagName("track"):
title = _extract(node, 'name')
artist = _extract(node, 'name', 1)
match = _number(_extract(node, "match"))
seq.append(SimilarItem(Track(artist, title, self.network), match))
return seq
def get_top_fans(self, limit = None):
"""Returns a list of the Users who played this track."""
doc = self._request('track.getTopFans', True)
seq = []
elements = doc.getElementsByTagName('user')
for element in elements:
if limit and len(seq) >= limit:
break
name = _extract(element, 'name')
weight = _number(_extract(element, 'weight'))
seq.append(TopItem(User(name, self.network), weight))
return seq
def share(self, users, message = None):
"""Shares this track (sends out recommendations).
* users: A list that can contain usernames, emails, User objects, or all of them.
* message: A message to include in the recommendation message.
"""
#last.fm currently accepts a max of 10 recipient at a time
while(len(users) > 10):
section = users[0:9]
users = users[9:]
self.share(section, message)
nusers = []
for user in users:
if isinstance(user, User):
nusers.append(user.get_name())
else:
nusers.append(user)
params = self._get_params()
recipients = ','.join(nusers)
params['recipient'] = recipients
if message:
params['message'] = message
self._request('track.share', False, params)
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the track page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_artist().get_name())
title = _url_safe(self.get_title())
return self.network._get_url(domain_name, "track") %{'domain': self.network._get_language_domain(domain_name), 'artist': artist, 'title': title}
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "track.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
class Group(_BaseObject):
"""A Last.fm group."""
name = None
def __init__(self, group_name, network):
_BaseObject.__init__(self, network)
self.name = group_name
def __repr__(self):
return "pylast.Group(%s, %s)" %(repr(self.name), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name() != other.get_name()
def _get_params(self):
return {'group': self.get_name()}
def get_name(self):
"""Returns the group name. """
return self.name
def get_weekly_chart_dates(self):
"""Returns a list of From and To tuples for the available charts."""
doc = self._request("group.getWeeklyChartList", True)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append( (node.getAttribute("from"), node.getAttribute("to")) )
return seq
def get_weekly_artist_charts(self, from_date = None, to_date = None):
"""Returns the weekly artist charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("group.getWeeklyArtistChart", True, params)
seq = []
for node in doc.getElementsByTagName("artist"):
item = Artist(_extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_album_charts(self, from_date = None, to_date = None):
"""Returns the weekly album charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("group.getWeeklyAlbumChart", True, params)
seq = []
for node in doc.getElementsByTagName("album"):
item = Album(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_track_charts(self, from_date = None, to_date = None):
"""Returns the weekly track charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("group.getWeeklyTrackChart", True, params)
seq = []
for node in doc.getElementsByTagName("track"):
item = Track(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the group page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "group") %{'name': name}
def get_members(self, limit=50):
"""
Returns a sequence of User objects
if limit==None it will return all
"""
nodes = _collect_nodes(limit, self, "group.getMembers", False)
users = []
for node in nodes:
users.append(User(_extract(node, "name"), self.network))
return users
class XSPF(_BaseObject):
"A Last.fm XSPF playlist."""
uri = None
def __init__(self, uri, network):
_BaseObject.__init__(self, network)
self.uri = uri
def _get_params(self):
return {'playlistURL': self.get_uri()}
@_string_output
def __str__(self):
return self.get_uri()
def __eq__(self, other):
return self.get_uri() == other.get_uri()
def __ne__(self, other):
return self.get_uri() != other.get_uri()
def get_uri(self):
"""Returns the Last.fm playlist URI. """
return self.uri
def get_tracks(self):
"""Returns the tracks on this playlist."""
doc = self._request('playlist.fetch', True)
seq = []
for n in doc.getElementsByTagName('track'):
title = _extract(n, 'title')
artist = _extract(n, 'creator')
seq.append(Track(artist, title, self.network))
return seq
class User(_BaseObject):
"""A Last.fm user."""
name = None
def __init__(self, user_name, network):
_BaseObject.__init__(self, network)
self.name = user_name
self._past_events_index = 0
self._recommended_events_index = 0
self._recommended_artists_index = 0
def __repr__(self):
return "pylast.User(%s, %s)" %(repr(self.name), repr(self.network))
@_string_output
def __str__(self):
return self.get_name()
def __eq__(self, another):
return self.get_name() == another.get_name()
def __ne__(self, another):
return self.get_name() != another.get_name()
def _get_params(self):
return {"user": self.get_name()}
def get_name(self, properly_capitalized=False):
"""Returns the nuser name."""
if properly_capitalized:
self.name = _extract(self._request("user.getInfo", True), "name")
return self.name
def get_upcoming_events(self):
"""Returns all the upcoming events for this user. """
doc = self._request('user.getEvents', True)
ids = _extract_all(doc, 'id')
events = []
for e_id in ids:
events.append(Event(e_id, self.network))
return events
def get_friends(self, limit = 50):
"""Returns a list of the user's friends. """
seq = []
for node in _collect_nodes(limit, self, "user.getFriends", False):
seq.append(User(_extract(node, "name"), self.network))
return seq
def get_loved_tracks(self, limit=50):
"""Returns this user's loved track as a sequence of LovedTrack objects
in reverse order of their timestamp, all the way back to the first track.
If limit==None, it will try to pull all the available data.
This method uses caching. Enable caching only if you're pulling a
large amount of data.
Use extract_items() with the return of this function to
get only a sequence of Track objects with no playback dates. """
params = self._get_params()
if limit:
params['limit'] = limit
seq = []
for track in _collect_nodes(limit, self, "user.getLovedTracks", True, params):
title = _extract(track, "name")
artist = _extract(track, "name", 1)
date = _extract(track, "date")
timestamp = track.getElementsByTagName("date")[0].getAttribute("uts")
seq.append(LovedTrack(Track(artist, title, self.network), date, timestamp))
return seq
def get_neighbours(self, limit = 50):
"""Returns a list of the user's friends."""
params = self._get_params()
if limit:
params['limit'] = limit
doc = self._request('user.getNeighbours', True, params)
seq = []
names = _extract_all(doc, 'name')
for name in names:
seq.append(User(name, self.network))
return seq
def get_past_events(self, limit=50):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for n in _collect_nodes(limit, self, "user.getPastEvents", False):
seq.append(Event(_extract(n, "id"), self.network))
return seq
def get_playlists(self):
"""Returns a list of Playlists that this user owns."""
doc = self._request("user.getPlaylists", True)
playlists = []
for playlist_id in _extract_all(doc, "id"):
playlists.append(Playlist(self.get_name(), playlist_id, self.network))
return playlists
def get_now_playing(self):
"""Returns the currently playing track, or None if nothing is playing. """
params = self._get_params()
params['limit'] = '1'
doc = self._request('user.getRecentTracks', False, params)
e = doc.getElementsByTagName('track')[0]
if not e.hasAttribute('nowplaying'):
return None
artist = _extract(e, 'artist')
title = _extract(e, 'name')
return Track(artist, title, self.network)
def get_recent_tracks(self, limit = 10):
"""Returns this user's played track as a sequence of PlayedTrack objects
in reverse order of their playtime, all the way back to the first track.
If limit==None, it will try to pull all the available data.
This method uses caching. Enable caching only if you're pulling a
large amount of data.
Use extract_items() with the return of this function to
get only a sequence of Track objects with no playback dates. """
params = self._get_params()
if limit:
params['limit'] = limit
seq = []
for track in _collect_nodes(limit, self, "user.getRecentTracks", True, params):
if track.hasAttribute('nowplaying'):
continue #to prevent the now playing track from sneaking in here
title = _extract(track, "name")
artist = _extract(track, "artist")
date = _extract(track, "date")
timestamp = track.getElementsByTagName("date")[0].getAttribute("uts")
seq.append(PlayedTrack(Track(artist, title, self.network), date, timestamp))
return seq
def get_id(self):
"""Returns the user id."""
doc = self._request("user.getInfo", True)
return _extract(doc, "id")
def get_language(self):
"""Returns the language code of the language used by the user."""
doc = self._request("user.getInfo", True)
return _extract(doc, "lang")
def get_country(self):
"""Returns the name of the country of the user."""
doc = self._request("user.getInfo", True)
return Country(_extract(doc, "country"), self.network)
def get_age(self):
"""Returns the user's age."""
doc = self._request("user.getInfo", True)
return _number(_extract(doc, "age"))
def get_gender(self):
"""Returns the user's gender. Either USER_MALE or USER_FEMALE."""
doc = self._request("user.getInfo", True)
value = _extract(doc, "gender")
if value == 'm':
return USER_MALE
elif value == 'f':
return USER_FEMALE
return None
def is_subscriber(self):
"""Returns whether the user is a subscriber or not. True or False."""
doc = self._request("user.getInfo", True)
return _extract(doc, "subscriber") == "1"
def get_playcount(self):
"""Returns the user's playcount so far."""
doc = self._request("user.getInfo", True)
return _number(_extract(doc, "playcount"))
def get_top_albums(self, period = PERIOD_OVERALL):
"""Returns the top albums played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_7DAYS
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
doc = self._request('user.getTopAlbums', True, params)
seq = []
for album in doc.getElementsByTagName('album'):
name = _extract(album, 'name')
artist = _extract(album, 'name', 1)
playcount = _extract(album, "playcount")
seq.append(TopItem(Album(artist, name, self.network), playcount))
return seq
def get_top_artists(self, period = PERIOD_OVERALL):
"""Returns the top artists played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_7DAYS
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
doc = self._request('user.getTopArtists', True, params)
seq = []
for node in doc.getElementsByTagName('artist'):
name = _extract(node, 'name')
playcount = _extract(node, "playcount")
seq.append(TopItem(Artist(name, self.network), playcount))
return seq
def get_top_tags(self, limit=None):
"""Returns a sequence of the top tags used by this user with their counts as TopItem objects.
* limit: The limit of how many tags to return.
"""
doc = self._request("user.getTopTags", True)
seq = []
for node in doc.getElementsByTagName("tag"):
seq.append(TopItem(Tag(_extract(node, "name"), self.network), _extract(node, "count")))
if limit:
seq = seq[:limit]
return seq
def get_top_tracks(self, period = PERIOD_OVERALL):
"""Returns the top tracks played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_7DAYS
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
doc = self._request('user.getTopTracks', True, params)
seq = []
for track in doc.getElementsByTagName('track'):
name = _extract(track, 'name')
artist = _extract(track, 'name', 1)
playcount = _extract(track, "playcount")
seq.append(TopItem(Track(artist, name, self.network), playcount))
return seq
def get_weekly_chart_dates(self):
"""Returns a list of From and To tuples for the available charts."""
doc = self._request("user.getWeeklyChartList", True)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append( (node.getAttribute("from"), node.getAttribute("to")) )
return seq
def get_weekly_artist_charts(self, from_date = None, to_date = None):
"""Returns the weekly artist charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("user.getWeeklyArtistChart", True, params)
seq = []
for node in doc.getElementsByTagName("artist"):
item = Artist(_extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_album_charts(self, from_date = None, to_date = None):
"""Returns the weekly album charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("user.getWeeklyAlbumChart", True, params)
seq = []
for node in doc.getElementsByTagName("album"):
item = Album(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_track_charts(self, from_date = None, to_date = None):
"""Returns the weekly track charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("user.getWeeklyTrackChart", True, params)
seq = []
for node in doc.getElementsByTagName("track"):
item = Track(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def compare_with_user(self, user, shared_artists_limit = None):
"""Compare this user with another Last.fm user.
Returns a sequence (tasteometer_score, (shared_artist1, shared_artist2, ...))
user: A User object or a username string/unicode object.
"""
if isinstance(user, User):
user = user.get_name()
params = self._get_params()
if shared_artists_limit:
params['limit'] = shared_artists_limit
params['type1'] = 'user'
params['type2'] = 'user'
params['value1'] = self.get_name()
params['value2'] = user
doc = self._request('tasteometer.compare', False, params)
score = _extract(doc, 'score')
artists = doc.getElementsByTagName('artists')[0]
shared_artists_names = _extract_all(artists, 'name')
shared_artists_seq = []
for name in shared_artists_names:
shared_artists_seq.append(Artist(name, self.network))
return (score, shared_artists_seq)
def get_image(self):
"""Returns the user's avatar."""
doc = self._request("user.getInfo", True)
return _extract(doc, "image")
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the user page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "user") %{'name': name}
def get_library(self):
"""Returns the associated Library object. """
return Library(self, self.network)
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "user.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("user.Shout", False, params)
class AuthenticatedUser(User):
def __init__(self, network):
User.__init__(self, "", network);
def _get_params(self):
return {"user": self.get_name()}
def get_name(self):
"""Returns the name of the authenticated user."""
doc = self._request("user.getInfo", True, {"user": ""}) # hack
self.name = _extract(doc, "name")
return self.name
def get_recommended_events(self, limit=50):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for node in _collect_nodes(limit, self, "user.getRecommendedEvents", False):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def get_recommended_artists(self, limit=50):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for node in _collect_nodes(limit, self, "user.getRecommendedArtists", False):
seq.append(Artist(_extract(node, "name"), self.network))
return seq
class _Search(_BaseObject):
"""An abstract class. Use one of its derivatives."""
def __init__(self, ws_prefix, search_terms, network):
_BaseObject.__init__(self, network)
self._ws_prefix = ws_prefix
self.search_terms = search_terms
self._last_page_index = 0
def _get_params(self):
params = {}
for key in self.search_terms.keys():
params[key] = self.search_terms[key]
return params
def get_total_result_count(self):
"""Returns the total count of all the results."""
doc = self._request(self._ws_prefix + ".search", True)
return _extract(doc, "opensearch:totalResults")
def _retreive_page(self, page_index):
"""Returns the node of matches to be processed"""
params = self._get_params()
params["page"] = str(page_index)
doc = self._request(self._ws_prefix + ".search", True, params)
return doc.getElementsByTagName(self._ws_prefix + "matches")[0]
def _retrieve_next_page(self):
self._last_page_index += 1
return self._retreive_page(self._last_page_index)
class AlbumSearch(_Search):
"""Search for an album by name."""
def __init__(self, album_name, network):
_Search.__init__(self, "album", {"album": album_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Album objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("album"):
seq.append(Album(_extract(node, "artist"), _extract(node, "name"), self.network))
return seq
class ArtistSearch(_Search):
"""Search for an artist by artist name."""
def __init__(self, artist_name, network):
_Search.__init__(self, "artist", {"artist": artist_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Artist objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("artist"):
artist = Artist(_extract(node, "name"), self.network)
artist.listener_count = _number(_extract(node, "listeners"))
seq.append(artist)
return seq
class TagSearch(_Search):
"""Search for a tag by tag name."""
def __init__(self, tag_name, network):
_Search.__init__(self, "tag", {"tag": tag_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Tag objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("tag"):
tag = Tag(_extract(node, "name"), self.network)
tag.tag_count = _number(_extract(node, "count"))
seq.append(tag)
return seq
class TrackSearch(_Search):
"""Search for a track by track title. If you don't wanna narrow the results down
by specifying the artist name, set it to empty string."""
def __init__(self, artist_name, track_title, network):
_Search.__init__(self, "track", {"track": track_title, "artist": artist_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Track objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("track"):
track = Track(_extract(node, "artist"), _extract(node, "name"), self.network)
track.listener_count = _number(_extract(node, "listeners"))
seq.append(track)
return seq
class VenueSearch(_Search):
"""Search for a venue by its name. If you don't wanna narrow the results down
by specifying a country, set it to empty string."""
def __init__(self, venue_name, country_name, network):
_Search.__init__(self, "venue", {"venue": venue_name, "country": country_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Track objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("venue"):
seq.append(Venue(_extract(node, "id"), self.network))
return seq
class Venue(_BaseObject):
"""A venue where events are held."""
# TODO: waiting for a venue.getInfo web service to use.
id = None
def __init__(self, id, network):
_BaseObject.__init__(self, network)
self.id = _number(id)
def __repr__(self):
return "pylast.Venue(%s, %s)" %(repr(self.id), repr(self.network))
@_string_output
def __str__(self):
return "Venue #" + str(self.id)
def __eq__(self, other):
return self.get_id() == other.get_id()
def _get_params(self):
return {"venue": self.get_id()}
def get_id(self):
"""Returns the id of the venue."""
return self.id
def get_upcoming_events(self):
"""Returns the upcoming events in this venue."""
doc = self._request("venue.getEvents", True)
seq = []
for node in doc.getElementsByTagName("event"):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def get_past_events(self):
"""Returns the past events held in this venue."""
doc = self._request("venue.getEvents", True)
seq = []
for node in doc.getElementsByTagName("event"):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def md5(text):
"""Returns the md5 hash of a string."""
h = hashlib.md5()
h.update(_unicode(text).encode("utf-8"))
return h.hexdigest()
def _unicode(text):
if sys.version_info[0] == 3:
if type(text) in (bytes, bytearray):
return str(text, "utf-8")
elif type(text) == str:
return text
else:
return str(text)
elif sys.version_info[0] ==2:
if type(text) in (str,):
return unicode(text, "utf-8")
elif type(text) == unicode:
return text
else:
return unicode(text)
def _string(text):
"""For Python2 routines that can only process str type."""
if sys.version_info[0] == 3:
if type(text) != str:
return str(text)
else:
return text
elif sys.version_info[0] == 2:
if type(text) == str:
return text
| if type(text) == int: | 9,909 | lcc_e | python | null | 801671b5a69a2c02e24e0b874f50d93613e0d5fdd104c618 |
|
#!/usr/bin/python -u
#
# This is the API builder, it parses the C sources and build the
# API formal description in XML.
#
# See Copyright for the status of this software.
#
# daniel@veillard.com
#
import os, sys
import string
import glob
import re
quiet=True
warnings=0
debug=False
debugsym=None
#
# C parser analysis code
#
included_files = {
"libvirt.h": "header with general libvirt API definitions",
"virterror.h": "header with error specific API definitions",
"libvirt.c": "Main interfaces for the libvirt library",
"virterror.c": "implements error handling and reporting code for libvirt",
"event.c": "event loop for monitoring file handles",
}
qemu_included_files = {
"libvirt-qemu.h": "header with QEMU specific API definitions",
"libvirt-qemu.c": "Implementations for the QEMU specific APIs",
}
lxc_included_files = {
"libvirt-lxc.h": "header with LXC specific API definitions",
"libvirt-lxc.c": "Implementations for the LXC specific APIs",
}
ignored_words = {
"ATTRIBUTE_UNUSED": (0, "macro keyword"),
"ATTRIBUTE_SENTINEL": (0, "macro keyword"),
"VIR_DEPRECATED": (0, "macro keyword"),
"VIR_EXPORT_VAR": (0, "macro keyword"),
"WINAPI": (0, "Windows keyword"),
"__declspec": (3, "Windows keyword"),
"__stdcall": (0, "Windows keyword"),
}
ignored_functions = {
"virDomainMigrateFinish": "private function for migration",
"virDomainMigrateFinish2": "private function for migration",
"virDomainMigratePerform": "private function for migration",
"virDomainMigratePrepare": "private function for migration",
"virDomainMigratePrepare2": "private function for migration",
"virDomainMigratePrepareTunnel": "private function for tunnelled migration",
"virDomainMigrateBegin3": "private function for migration",
"virDomainMigrateFinish3": "private function for migration",
"virDomainMigratePerform3": "private function for migration",
"virDomainMigratePrepare3": "private function for migration",
"virDomainMigrateConfirm3": "private function for migration",
"virDomainMigratePrepareTunnel3": "private function for tunnelled migration",
"virDrvSupportsFeature": "private function for remote access",
"DllMain": "specific function for Win32",
"virEventAddHandle": "internal function in event.c",
"virEventUpdateHandle": "internal function in event.c",
"virEventRemoveHandle": "internal function in event.c",
"virEventAddTimeout": "internal function in event.c",
"virEventUpdateTimeout": "internal function in event.c",
"virEventRemoveTimeout": "internal function in event.c",
}
ignored_macros = {
"_virSchedParameter": "backward compatibility macro for virTypedParameter",
"_virBlkioParameter": "backward compatibility macro for virTypedParameter",
"_virMemoryParameter": "backward compatibility macro for virTypedParameter",
}
def escape(raw):
raw = string.replace(raw, '&', '&')
raw = string.replace(raw, '<', '<')
raw = string.replace(raw, '>', '>')
raw = string.replace(raw, "'", ''')
raw = string.replace(raw, '"', '"')
return raw
def uniq(items):
d = {}
for item in items:
d[item]=1
k = d.keys()
k.sort()
return k
class identifier:
def __init__(self, name, header=None, module=None, type=None, lineno = 0,
info=None, extra=None, conditionals = None):
self.name = name
self.header = header
self.module = module
self.type = type
self.info = info
self.extra = extra
self.lineno = lineno
self.static = 0
if conditionals == None or len(conditionals) == 0:
self.conditionals = None
else:
self.conditionals = conditionals[:]
if self.name == debugsym and not quiet:
print "=> define %s : %s" % (debugsym, (module, type, info,
extra, conditionals))
def __repr__(self):
r = "%s %s:" % (self.type, self.name)
if self.static:
r = r + " static"
if self.module != None:
r = r + " from %s" % (self.module)
if self.info != None:
r = r + " " + `self.info`
if self.extra != None:
r = r + " " + `self.extra`
if self.conditionals != None:
r = r + " " + `self.conditionals`
return r
def set_header(self, header):
self.header = header
def set_module(self, module):
self.module = module
def set_type(self, type):
self.type = type
def set_info(self, info):
self.info = info
def set_extra(self, extra):
self.extra = extra
def set_lineno(self, lineno):
self.lineno = lineno
def set_static(self, static):
self.static = static
def set_conditionals(self, conditionals):
if conditionals == None or len(conditionals) == 0:
self.conditionals = None
else:
self.conditionals = conditionals[:]
def get_name(self):
return self.name
def get_header(self):
return self.module
def get_module(self):
return self.module
def get_type(self):
return self.type
def get_info(self):
return self.info
def get_lineno(self):
return self.lineno
def get_extra(self):
return self.extra
def get_static(self):
return self.static
def get_conditionals(self):
return self.conditionals
def update(self, header, module, type = None, info = None, extra=None,
conditionals=None):
if self.name == debugsym and not quiet:
print "=> update %s : %s" % (debugsym, (module, type, info,
extra, conditionals))
if header != None and self.header == None:
self.set_header(module)
if module != None and (self.module == None or self.header == self.module):
self.set_module(module)
if type != None and self.type == None:
self.set_type(type)
if info != None:
self.set_info(info)
if extra != None:
self.set_extra(extra)
if conditionals != None:
self.set_conditionals(conditionals)
class index:
def __init__(self, name = "noname"):
self.name = name
self.identifiers = {}
self.functions = {}
self.variables = {}
self.includes = {}
self.structs = {}
self.unions = {}
self.enums = {}
self.typedefs = {}
self.macros = {}
self.references = {}
self.info = {}
def add_ref(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
if name[0:2] == '__':
return None
d = None
try:
d = self.identifiers[name]
d.update(header, module, type, lineno, info, extra, conditionals)
except:
d = identifier(name, header, module, type, lineno, info, extra, conditionals)
self.identifiers[name] = d
if d != None and static == 1:
d.set_static(1)
if d != None and name != None and type != None:
self.references[name] = d
if name == debugsym and not quiet:
print "New ref: %s" % (d)
return d
def add(self, name, header, module, static, type, lineno, info=None, extra=None, conditionals = None):
if name[0:2] == '__':
return None
d = None
try:
d = self.identifiers[name]
d.update(header, module, type, lineno, info, extra, conditionals)
except:
d = identifier(name, header, module, type, lineno, info, extra, conditionals)
self.identifiers[name] = d
if d != None and static == 1:
d.set_static(1)
if d != None and name != None and type != None:
if type == "function":
self.functions[name] = d
elif type == "functype":
self.functions[name] = d
elif type == "variable":
self.variables[name] = d
elif type == "include":
self.includes[name] = d
elif type == "struct":
self.structs[name] = d
elif type == "union":
self.unions[name] = d
elif type == "enum":
self.enums[name] = d
elif type == "typedef":
self.typedefs[name] = d
elif type == "macro":
self.macros[name] = d
else:
self.warning("Unable to register type ", type)
if name == debugsym and not quiet:
print "New symbol: %s" % (d)
return d
def merge(self, idx):
for id in idx.functions.keys():
#
# macro might be used to override functions or variables
# definitions
#
if self.macros.has_key(id):
del self.macros[id]
if self.functions.has_key(id):
self.warning("function %s from %s redeclared in %s" % (
id, self.functions[id].header, idx.functions[id].header))
else:
self.functions[id] = idx.functions[id]
self.identifiers[id] = idx.functions[id]
for id in idx.variables.keys():
#
# macro might be used to override functions or variables
# definitions
#
if self.macros.has_key(id):
del self.macros[id]
if self.variables.has_key(id):
self.warning("variable %s from %s redeclared in %s" % (
id, self.variables[id].header, idx.variables[id].header))
else:
self.variables[id] = idx.variables[id]
self.identifiers[id] = idx.variables[id]
for id in idx.structs.keys():
if self.structs.has_key(id):
self.warning("struct %s from %s redeclared in %s" % (
id, self.structs[id].header, idx.structs[id].header))
else:
self.structs[id] = idx.structs[id]
self.identifiers[id] = idx.structs[id]
for id in idx.unions.keys():
if self.unions.has_key(id):
print "union %s from %s redeclared in %s" % (
id, self.unions[id].header, idx.unions[id].header)
else:
self.unions[id] = idx.unions[id]
self.identifiers[id] = idx.unions[id]
for id in idx.typedefs.keys():
if self.typedefs.has_key(id):
self.warning("typedef %s from %s redeclared in %s" % (
id, self.typedefs[id].header, idx.typedefs[id].header))
else:
self.typedefs[id] = idx.typedefs[id]
self.identifiers[id] = idx.typedefs[id]
for id in idx.macros.keys():
#
# macro might be used to override functions or variables
# definitions
#
if self.variables.has_key(id):
continue
if self.functions.has_key(id):
continue
if self.enums.has_key(id):
continue
if self.macros.has_key(id):
self.warning("macro %s from %s redeclared in %s" % (
id, self.macros[id].header, idx.macros[id].header))
else:
self.macros[id] = idx.macros[id]
self.identifiers[id] = idx.macros[id]
for id in idx.enums.keys():
if self.enums.has_key(id):
self.warning("enum %s from %s redeclared in %s" % (
id, self.enums[id].header, idx.enums[id].header))
else:
self.enums[id] = idx.enums[id]
self.identifiers[id] = idx.enums[id]
def merge_public(self, idx):
for id in idx.functions.keys():
if self.functions.has_key(id):
# check that function condition agrees with header
if idx.functions[id].conditionals != \
self.functions[id].conditionals:
self.warning("Header condition differs from Function for %s:" \
% id)
self.warning(" H: %s" % self.functions[id].conditionals)
self.warning(" C: %s" % idx.functions[id].conditionals)
up = idx.functions[id]
self.functions[id].update(None, up.module, up.type, up.info, up.extra)
# else:
# print "Function %s from %s is not declared in headers" % (
# id, idx.functions[id].module)
# TODO: do the same for variables.
def analyze_dict(self, type, dict):
count = 0
public = 0
for name in dict.keys():
id = dict[name]
count = count + 1
if id.static == 0:
public = public + 1
if count != public:
print " %d %s , %d public" % (count, type, public)
elif count != 0:
print " %d public %s" % (count, type)
def analyze(self):
if not quiet:
self.analyze_dict("functions", self.functions)
self.analyze_dict("variables", self.variables)
self.analyze_dict("structs", self.structs)
self.analyze_dict("unions", self.unions)
self.analyze_dict("typedefs", self.typedefs)
self.analyze_dict("macros", self.macros)
class CLexer:
"""A lexer for the C language, tokenize the input by reading and
analyzing it line by line"""
def __init__(self, input):
self.input = input
self.tokens = []
self.line = ""
self.lineno = 0
def getline(self):
line = ''
while line == '':
line = self.input.readline()
if not line:
return None
self.lineno = self.lineno + 1
line = string.lstrip(line)
line = string.rstrip(line)
if line == '':
continue
while line[-1] == '\\':
line = line[:-1]
n = self.input.readline()
self.lineno = self.lineno + 1
n = string.lstrip(n)
n = string.rstrip(n)
if not n:
break
else:
line = line + n
return line
def getlineno(self):
return self.lineno
def push(self, token):
self.tokens.insert(0, token);
def debug(self):
print "Last token: ", self.last
print "Token queue: ", self.tokens
print "Line %d end: " % (self.lineno), self.line
def token(self):
while self.tokens == []:
if self.line == "":
line = self.getline()
else:
line = self.line
self.line = ""
if line == None:
return None
if line[0] == '#':
self.tokens = map((lambda x: ('preproc', x)),
string.split(line))
break;
l = len(line)
if line[0] == '"' or line[0] == "'":
end = line[0]
line = line[1:]
found = 0
tok = ""
while found == 0:
i = 0
l = len(line)
while i < l:
if line[i] == end:
self.line = line[i+1:]
line = line[:i]
l = i
found = 1
break
if line[i] == '\\':
i = i + 1
i = i + 1
tok = tok + line
if found == 0:
line = self.getline()
if line == None:
return None
self.last = ('string', tok)
return self.last
if l >= 2 and line[0] == '/' and line[1] == '*':
line = line[2:]
found = 0
tok = ""
while found == 0:
i = 0
l = len(line)
while i < l:
if line[i] == '*' and i+1 < l and line[i+1] == '/':
self.line = line[i+2:]
line = line[:i-1]
l = i
found = 1
break
i = i + 1
if tok != "":
tok = tok + "\n"
tok = tok + line
if found == 0:
line = self.getline()
if line == None:
return None
self.last = ('comment', tok)
return self.last
if l >= 2 and line[0] == '/' and line[1] == '/':
line = line[2:]
self.last = ('comment', line)
return self.last
i = 0
while i < l:
if line[i] == '/' and i+1 < l and line[i+1] == '/':
self.line = line[i:]
line = line[:i]
break
if line[i] == '/' and i+1 < l and line[i+1] == '*':
self.line = line[i:]
line = line[:i]
break
if line[i] == '"' or line[i] == "'":
self.line = line[i:]
line = line[:i]
break
i = i + 1
l = len(line)
i = 0
while i < l:
if line[i] == ' ' or line[i] == '\t':
i = i + 1
continue
o = ord(line[i])
if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
(o >= 48 and o <= 57):
s = i
while i < l:
o = ord(line[i])
if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
(o >= 48 and o <= 57) or string.find(
" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1:
i = i + 1
else:
break
self.tokens.append(('name', line[s:i]))
continue
if string.find("(){}:;,[]", line[i]) != -1:
# if line[i] == '(' or line[i] == ')' or line[i] == '{' or \
# line[i] == '}' or line[i] == ':' or line[i] == ';' or \
# line[i] == ',' or line[i] == '[' or line[i] == ']':
self.tokens.append(('sep', line[i]))
i = i + 1
continue
if string.find("+-*><=/%&!|.", line[i]) != -1:
# if line[i] == '+' or line[i] == '-' or line[i] == '*' or \
# line[i] == '>' or line[i] == '<' or line[i] == '=' or \
# line[i] == '/' or line[i] == '%' or line[i] == '&' or \
# line[i] == '!' or line[i] == '|' or line[i] == '.':
if line[i] == '.' and i + 2 < l and \
line[i+1] == '.' and line[i+2] == '.':
self.tokens.append(('name', '...'))
i = i + 3
continue
j = i + 1
if j < l and (
string.find("+-*><=/%&!|", line[j]) != -1):
# line[j] == '+' or line[j] == '-' or line[j] == '*' or \
# line[j] == '>' or line[j] == '<' or line[j] == '=' or \
# line[j] == '/' or line[j] == '%' or line[j] == '&' or \
# line[j] == '!' or line[j] == '|'):
self.tokens.append(('op', line[i:j+1]))
i = j + 1
else:
self.tokens.append(('op', line[i]))
i = i + 1
continue
s = i
while i < l:
o = ord(line[i])
if (o >= 97 and o <= 122) or (o >= 65 and o <= 90) or \
(o >= 48 and o <= 57) or (
string.find(" \t(){}:;,+-*/%&!|[]=><", line[i]) == -1):
# line[i] != ' ' and line[i] != '\t' and
# line[i] != '(' and line[i] != ')' and
# line[i] != '{' and line[i] != '}' and
# line[i] != ':' and line[i] != ';' and
# line[i] != ',' and line[i] != '+' and
# line[i] != '-' and line[i] != '*' and
# line[i] != '/' and line[i] != '%' and
# line[i] != '&' and line[i] != '!' and
# line[i] != '|' and line[i] != '[' and
# line[i] != ']' and line[i] != '=' and
# line[i] != '*' and line[i] != '>' and
# line[i] != '<'):
i = i + 1
else:
break
self.tokens.append(('name', line[s:i]))
tok = self.tokens[0]
self.tokens = self.tokens[1:]
self.last = tok
return tok
class CParser:
"""The C module parser"""
def __init__(self, filename, idx = None):
self.filename = filename
if len(filename) > 2 and filename[-2:] == '.h':
self.is_header = 1
else:
self.is_header = 0
self.input = open(filename)
self.lexer = CLexer(self.input)
if idx == None:
self.index = index()
else:
self.index = idx
self.top_comment = ""
self.last_comment = ""
self.comment = None
self.collect_ref = 0
self.no_error = 0
self.conditionals = []
self.defines = []
def collect_references(self):
self.collect_ref = 1
def stop_error(self):
self.no_error = 1
def start_error(self):
self.no_error = 0
def lineno(self):
return self.lexer.getlineno()
def index_add(self, name, module, static, type, info=None, extra = None):
if self.is_header == 1:
self.index.add(name, module, module, static, type, self.lineno(),
info, extra, self.conditionals)
else:
self.index.add(name, None, module, static, type, self.lineno(),
info, extra, self.conditionals)
def index_add_ref(self, name, module, static, type, info=None,
extra = None):
if self.is_header == 1:
self.index.add_ref(name, module, module, static, type,
self.lineno(), info, extra, self.conditionals)
else:
self.index.add_ref(name, None, module, static, type, self.lineno(),
info, extra, self.conditionals)
def warning(self, msg):
global warnings
warnings = warnings + 1
if self.no_error:
return
print msg
def error(self, msg, token=-1):
if self.no_error:
return
print "Parse Error: " + msg
if token != -1:
print "Got token ", token
self.lexer.debug()
sys.exit(1)
def debug(self, msg, token=-1):
print "Debug: " + msg
if token != -1:
print "Got token ", token
self.lexer.debug()
def parseTopComment(self, comment):
res = {}
lines = string.split(comment, "\n")
item = None
for line in lines:
line = line.lstrip().lstrip('*').lstrip()
m = re.match('([_.a-zA-Z0-9]+):(.*)', line)
if m:
item = m.group(1)
line = m.group(2).lstrip()
if item:
if res.has_key(item):
res[item] = res[item] + " " + line
else:
res[item] = line
self.index.info = res
def strip_lead_star(self, line):
l = len(line)
i = 0
while i < l:
if line[i] == ' ' or line[i] == '\t':
i += 1
elif line[i] == '*':
return line[:i] + line[i + 1:]
else:
return line
return line
def cleanupComment(self):
if type(self.comment) != type(""):
return
# remove the leading * on multi-line comments
lines = self.comment.splitlines(True)
com = ""
for line in lines:
com = com + self.strip_lead_star(line)
self.comment = com.strip()
def parseComment(self, token):
com = token[1]
if self.top_comment == "":
self.top_comment = com
if self.comment == None or com[0] == '*':
self.comment = com;
else:
self.comment = self.comment + com
token = self.lexer.token()
if string.find(self.comment, "DOC_DISABLE") != -1:
self.stop_error()
if string.find(self.comment, "DOC_ENABLE") != -1:
self.start_error()
return token
#
# Parse a comment block associate to a typedef
#
def parseTypeComment(self, name, quiet = 0):
if name[0:2] == '__':
quiet = 1
args = []
desc = ""
if self.comment == None:
if not quiet:
self.warning("Missing comment for type %s" % (name))
return((args, desc))
if self.comment[0] != '*':
if not quiet:
self.warning("Missing * in type comment for %s" % (name))
return((args, desc))
lines = string.split(self.comment, '\n')
if lines[0] == '*':
del lines[0]
if lines[0] != "* %s:" % (name):
if not quiet:
self.warning("Misformatted type comment for %s" % (name))
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
return((args, desc))
del lines[0]
while len(lines) > 0 and lines[0] == '*':
del lines[0]
desc = ""
while len(lines) > 0:
l = lines[0]
while len(l) > 0 and l[0] == '*':
l = l[1:]
l = string.strip(l)
desc = desc + " " + l
del lines[0]
desc = string.strip(desc)
if quiet == 0:
if desc == "":
self.warning("Type comment for %s lack description of the macro" % (name))
return(desc)
#
# Parse a comment block associate to a macro
#
def parseMacroComment(self, name, quiet = 0):
global ignored_macros
if name[0:2] == '__':
quiet = 1
if ignored_macros.has_key(name):
quiet = 1
args = []
desc = ""
if self.comment == None:
if not quiet:
self.warning("Missing comment for macro %s" % (name))
return((args, desc))
if self.comment[0] != '*':
if not quiet:
self.warning("Missing * in macro comment for %s" % (name))
return((args, desc))
lines = string.split(self.comment, '\n')
if lines[0] == '*':
del lines[0]
if lines[0] != "* %s:" % (name):
if not quiet:
self.warning("Misformatted macro comment for %s" % (name))
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
return((args, desc))
del lines[0]
while lines[0] == '*':
del lines[0]
while len(lines) > 0 and lines[0][0:3] == '* @':
l = lines[0][3:]
try:
(arg, desc) = string.split(l, ':', 1)
desc=string.strip(desc)
arg=string.strip(arg)
except:
if not quiet:
self.warning("Misformatted macro comment for %s" % (name))
self.warning(" problem with '%s'" % (lines[0]))
del lines[0]
continue
del lines[0]
l = string.strip(lines[0])
while len(l) > 2 and l[0:3] != '* @':
while l[0] == '*':
l = l[1:]
desc = desc + ' ' + string.strip(l)
del lines[0]
if len(lines) == 0:
break
l = lines[0]
args.append((arg, desc))
while len(lines) > 0 and lines[0] == '*':
del lines[0]
desc = ""
while len(lines) > 0:
l = lines[0]
while len(l) > 0 and l[0] == '*':
l = l[1:]
l = string.strip(l)
desc = desc + " " + l
del lines[0]
desc = string.strip(desc)
if quiet == 0:
if desc == "":
self.warning("Macro comment for %s lack description of the macro" % (name))
return((args, desc))
#
# Parse a comment block and merge the information found in the
# parameters descriptions, finally returns a block as complete
# as possible
#
def mergeFunctionComment(self, name, description, quiet = 0):
global ignored_functions
if name == 'main':
quiet = 1
if name[0:2] == '__':
quiet = 1
if ignored_functions.has_key(name):
quiet = 1
(ret, args) = description
desc = ""
retdesc = ""
if self.comment == None:
if not quiet:
self.warning("Missing comment for function %s" % (name))
return(((ret[0], retdesc), args, desc))
if self.comment[0] != '*':
if not quiet:
self.warning("Missing * in function comment for %s" % (name))
return(((ret[0], retdesc), args, desc))
lines = string.split(self.comment, '\n')
if lines[0] == '*':
del lines[0]
if lines[0] != "* %s:" % (name):
if not quiet:
self.warning("Misformatted function comment for %s" % (name))
self.warning(" Expecting '* %s:' got '%s'" % (name, lines[0]))
return(((ret[0], retdesc), args, desc))
del lines[0]
while lines[0] == '*':
del lines[0]
nbargs = len(args)
while len(lines) > 0 and lines[0][0:3] == '* @':
l = lines[0][3:]
try:
(arg, desc) = string.split(l, ':', 1)
desc=string.strip(desc)
arg=string.strip(arg)
except:
if not quiet:
self.warning("Misformatted function comment for %s" % (name))
self.warning(" problem with '%s'" % (lines[0]))
del lines[0]
continue
del lines[0]
l = string.strip(lines[0])
while len(l) > 2 and l[0:3] != '* @':
while l[0] == '*':
l = l[1:]
desc = desc + ' ' + string.strip(l)
del lines[0]
if len(lines) == 0:
break
l = lines[0]
i = 0
while i < nbargs:
if args[i][1] == arg:
args[i] = (args[i][0], arg, desc)
break;
i = i + 1
if i >= nbargs:
if not quiet:
self.warning("Unable to find arg %s from function comment for %s" % (
arg, name))
while len(lines) > 0 and lines[0] == '*':
del lines[0]
desc = None
while len(lines) > 0:
l = lines[0]
i = 0
# Remove all leading '*', followed by at most one ' ' character
# since we need to preserve correct identation of code examples
while i < len(l) and l[i] == '*':
i = i + 1
if i > 0:
if i < len(l) and l[i] == ' ':
i = i + 1
l = l[i:]
if len(l) >= 6 and l[0:7] == "returns" or l[0:7] == "Returns":
try:
l = string.split(l, ' ', 1)[1]
except:
l = ""
retdesc = string.strip(l)
del lines[0]
while len(lines) > 0:
l = lines[0]
while len(l) > 0 and l[0] == '*':
l = l[1:]
l = string.strip(l)
retdesc = retdesc + " " + l
del lines[0]
else:
if desc is not None:
desc = desc + "\n" + l
else:
desc = l
del lines[0]
if desc is None:
desc = ""
retdesc = string.strip(retdesc)
desc = string.strip(desc)
if quiet == 0:
#
# report missing comments
#
i = 0
while i < nbargs:
if args[i][2] == None and args[i][0] != "void" and args[i][1] != None:
self.warning("Function comment for %s lacks description of arg %s" % (name, args[i][1]))
i = i + 1
if retdesc == "" and ret[0] != "void":
self.warning("Function comment for %s lacks description of return value" % (name))
if desc == "":
self.warning("Function comment for %s lacks description of the function" % (name))
return(((ret[0], retdesc), args, desc))
def parsePreproc(self, token):
if debug:
print "=> preproc ", token, self.lexer.tokens
name = token[1]
if name == "#include":
token = self.lexer.token()
if token == None:
return None
if token[0] == 'preproc':
self.index_add(token[1], self.filename, not self.is_header,
"include")
return self.lexer.token()
return token
if name == "#define":
token = self.lexer.token()
if token == None:
return None
if token[0] == 'preproc':
# TODO macros with arguments
name = token[1]
lst = []
token = self.lexer.token()
while token != None and token[0] == 'preproc' and \
token[1][0] != '#':
lst.append(token[1])
token = self.lexer.token()
try:
name = string.split(name, '(') [0]
except:
pass
info = self.parseMacroComment(name, not self.is_header)
self.index_add(name, self.filename, not self.is_header,
"macro", info)
return token
#
# Processing of conditionals modified by Bill 1/1/05
#
# We process conditionals (i.e. tokens from #ifdef, #ifndef,
# #if, #else and #endif) for headers and mainline code,
# store the ones from the header in libxml2-api.xml, and later
# (in the routine merge_public) verify that the two (header and
# mainline code) agree.
#
# There is a small problem with processing the headers. Some of
# the variables are not concerned with enabling / disabling of
# library functions (e.g. '__XML_PARSER_H__'), and we don't want
# them to be included in libxml2-api.xml, or involved in
# the check between the header and the mainline code. To
# accomplish this, we ignore any conditional which doesn't include
# the string 'ENABLED'
#
if name == "#ifdef":
apstr = self.lexer.tokens[0][1]
try:
self.defines.append(apstr)
if string.find(apstr, 'ENABLED') != -1:
self.conditionals.append("defined(%s)" % apstr)
except:
pass
elif name == "#ifndef":
apstr = self.lexer.tokens[0][1]
try:
self.defines.append(apstr)
if string.find(apstr, 'ENABLED') != -1:
self.conditionals.append("!defined(%s)" % apstr)
except:
pass
elif name == "#if":
apstr = ""
for tok in self.lexer.tokens:
if apstr != "":
apstr = apstr + " "
apstr = apstr + tok[1]
try:
self.defines.append(apstr)
if string.find(apstr, 'ENABLED') != -1:
self.conditionals.append(apstr)
except:
pass
elif name == "#else":
if self.conditionals != [] and \
string.find(self.defines[-1], 'ENABLED') != -1:
self.conditionals[-1] = "!(%s)" % self.conditionals[-1]
elif name == "#endif":
if self.conditionals != [] and \
string.find(self.defines[-1], 'ENABLED') != -1:
self.conditionals = self.conditionals[:-1]
self.defines = self.defines[:-1]
token = self.lexer.token()
while token != None and token[0] == 'preproc' and \
token[1][0] != '#':
token = self.lexer.token()
return token
#
# token acquisition on top of the lexer, it handle internally
# preprocessor and comments since they are logically not part of
# the program structure.
#
def push(self, tok):
self.lexer.push(tok)
def token(self):
global ignored_words
token = self.lexer.token()
while token != None:
if token[0] == 'comment':
token = self.parseComment(token)
continue
elif token[0] == 'preproc':
token = self.parsePreproc(token)
continue
elif token[0] == "name" and token[1] == "__const":
token = ("name", "const")
return token
elif token[0] == "name" and token[1] == "__attribute":
token = self.lexer.token()
while token != None and token[1] != ";":
token = self.lexer.token()
return token
elif token[0] == "name" and ignored_words.has_key(token[1]):
(n, info) = ignored_words[token[1]]
i = 0
while i < n:
token = self.lexer.token()
i = i + 1
token = self.lexer.token()
continue
else:
if debug:
print "=> ", token
return token
return None
#
# Parse a typedef, it records the type and its name.
#
def parseTypedef(self, token):
if token == None:
return None
token = self.parseType(token)
if token == None:
self.error("parsing typedef")
return None
base_type = self.type
type = base_type
#self.debug("end typedef type", token)
while token != None:
if token[0] == "name":
name = token[1]
signature = self.signature
if signature != None:
type = string.split(type, '(')[0]
d = self.mergeFunctionComment(name,
((type, None), signature), 1)
self.index_add(name, self.filename, not self.is_header,
"functype", d)
else:
if base_type == "struct":
self.index_add(name, self.filename, not self.is_header,
"struct", type)
base_type = "struct " + name
else:
# TODO report missing or misformatted comments
info = self.parseTypeComment(name, 1)
self.index_add(name, self.filename, not self.is_header,
"typedef", type, info)
token = self.token()
else:
self.error("parsing typedef: expecting a name")
return token
#self.debug("end typedef", token)
if token != None and token[0] == 'sep' and token[1] == ',':
type = base_type
token = self.token()
while token != None and token[0] == "op":
type = type + token[1]
token = self.token()
elif token != None and token[0] == 'sep' and token[1] == ';':
break;
elif token != None and token[0] == 'name':
type = base_type
continue;
else:
self.error("parsing typedef: expecting ';'", token)
return token
token = self.token()
return token
#
# Parse a C code block, used for functions it parse till
# the balancing } included
#
def parseBlock(self, token):
while token != None:
if token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseBlock(token)
elif token[0] == "sep" and token[1] == "}":
self.comment = None
token = self.token()
return token
else:
if self.collect_ref == 1:
oldtok = token
token = self.token()
if oldtok[0] == "name" and oldtok[1][0:3] == "vir":
if token[0] == "sep" and token[1] == "(":
self.index_add_ref(oldtok[1], self.filename,
0, "function")
token = self.token()
elif token[0] == "name":
token = self.token()
if token[0] == "sep" and (token[1] == ";" or
token[1] == "," or token[1] == "="):
self.index_add_ref(oldtok[1], self.filename,
0, "type")
elif oldtok[0] == "name" and oldtok[1][0:4] == "XEN_":
self.index_add_ref(oldtok[1], self.filename,
0, "typedef")
elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXEN_":
self.index_add_ref(oldtok[1], self.filename,
0, "typedef")
else:
token = self.token()
return token
#
# Parse a C struct definition till the balancing }
#
def parseStruct(self, token):
fields = []
#self.debug("start parseStruct", token)
while token != None:
if token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
elif token[0] == "sep" and token[1] == "}":
self.struct_fields = fields
#self.debug("end parseStruct", token)
#print fields
token = self.token()
return token
else:
base_type = self.type
#self.debug("before parseType", token)
token = self.parseType(token)
#self.debug("after parseType", token)
if token != None and token[0] == "name":
fname = token[1]
token = self.token()
if token[0] == "sep" and token[1] == ";":
self.comment = None
token = self.token()
self.cleanupComment()
if self.type == "union":
fields.append((self.type, fname, self.comment,
self.union_fields))
self.union_fields = []
else:
fields.append((self.type, fname, self.comment))
self.comment = None
else:
self.error("parseStruct: expecting ;", token)
elif token != None and token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
if token != None and token[0] == "name":
token = self.token()
if token != None and token[0] == "sep" and token[1] == ";":
token = self.token()
else:
self.error("parseStruct: expecting ;", token)
else:
self.error("parseStruct: name", token)
token = self.token()
self.type = base_type;
self.struct_fields = fields
#self.debug("end parseStruct", token)
#print fields
return token
#
# Parse a C union definition till the balancing }
#
def parseUnion(self, token):
fields = []
# self.debug("start parseUnion", token)
while token != None:
if token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
elif token[0] == "sep" and token[1] == "}":
self.union_fields = fields
# self.debug("end parseUnion", token)
# print fields
token = self.token()
return token
else:
base_type = self.type
# self.debug("before parseType", token)
token = self.parseType(token)
# self.debug("after parseType", token)
if token != None and token[0] == "name":
fname = token[1]
token = self.token()
if token[0] == "sep" and token[1] == ";":
self.comment = None
token = self.token()
self.cleanupComment()
fields.append((self.type, fname, self.comment))
self.comment = None
else:
self.error("parseUnion: expecting ;", token)
elif token != None and token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
if token != None and token[0] == "name":
token = self.token()
if token != None and token[0] == "sep" and token[1] == ";":
token = self.token()
else:
self.error("parseUnion: expecting ;", token)
else:
self.error("parseUnion: name", token)
token = self.token()
self.type = base_type;
self.union_fields = fields
# self.debug("end parseUnion", token)
# print fields
return token
#
# Parse a C enum block, parse till the balancing }
#
def parseEnumBlock(self, token):
self.enums = []
name = None
self.comment = None
comment = ""
value = "0"
while token != None:
if token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
elif token[0] == "sep" and token[1] == "}":
if name != None:
self.cleanupComment()
if self.comment != None:
comment = self.comment
self.comment = None
self.enums.append((name, value, comment))
token = self.token()
return token
elif token[0] == "name":
self.cleanupComment()
if name != None:
if self.comment != None:
comment = string.strip(self.comment)
self.comment = None
self.enums.append((name, value, comment))
name = token[1]
comment = ""
token = self.token()
if token[0] == "op" and token[1][0] == "=":
value = ""
if len(token[1]) > 1:
value = token[1][1:]
token = self.token()
while token[0] != "sep" or (token[1] != ',' and
token[1] != '}'):
value = value + token[1]
token = self.token()
else:
try:
value = "%d" % (int(value) + 1)
except:
self.warning("Failed to compute value of enum %s" % (name))
value=""
if token[0] == "sep" and token[1] == ",":
token = self.token()
else:
token = self.token()
return token
def parseVirEnumDecl(self, token):
if token[0] != "name":
self.error("parsing VIR_ENUM_DECL: expecting name", token)
token = self.token()
if token[0] != "sep":
self.error("parsing VIR_ENUM_DECL: expecting ')'", token)
if token[1] != ')':
self.error("parsing VIR_ENUM_DECL: expecting ')'", token)
token = self.token()
if token[0] == "sep" and token[1] == ';':
token = self.token()
return token
def parseVirEnumImpl(self, token):
# First the type name
if token[0] != "name":
self.error("parsing VIR_ENUM_IMPL: expecting name", token)
token = self.token()
if token[0] != "sep":
self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
if token[1] != ',':
self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
token = self.token()
# Now the sentinel name
if token[0] != "name":
self.error("parsing VIR_ENUM_IMPL: expecting name", token)
token = self.token()
if token[0] != "sep":
self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
if token[1] != ',':
self.error("parsing VIR_ENUM_IMPL: expecting ','", token)
token = self.token()
# Now a list of strings (optional comments)
while token is not None:
isGettext = False
# First a string, optionally with N_(...)
if token[0] == 'name':
if token[1] != 'N_':
self.error("parsing VIR_ENUM_IMPL: expecting 'N_'", token)
token = self.token()
if token[0] != "sep" or token[1] != '(':
self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
token = self.token()
isGettext = True
if token[0] != "string":
self.error("parsing VIR_ENUM_IMPL: expecting a string", token)
token = self.token()
elif token[0] == "string":
token = self.token()
else:
self.error("parsing VIR_ENUM_IMPL: expecting a string", token)
# Then a separator
if token[0] == "sep":
if isGettext and token[1] == ')':
token = self.token()
if token[1] == ',':
token = self.token()
if token[1] == ')':
token = self.token()
break
# Then an optional comment
if token[0] == "comment":
token = self.token()
if token[0] == "sep" and token[1] == ';':
token = self.token()
return token
#
# Parse a C definition block, used for structs or unions it parse till
# the balancing }
#
def parseTypeBlock(self, token):
while token != None:
if token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseTypeBlock(token)
elif token[0] == "sep" and token[1] == "}":
token = self.token()
return token
else:
token = self.token()
return token
#
# Parse a type: the fact that the type name can either occur after
# the definition or within the definition makes it a little harder
# if inside, the name token is pushed back before returning
#
def parseType(self, token):
self.type = ""
self.struct_fields = []
self.union_fields = []
self.signature = None
if token == None:
return token
while token[0] == "name" and (
token[1] == "const" or \
token[1] == "unsigned" or \
token[1] == "signed"):
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
token = self.token()
if token[0] == "name" and token[1] == "long":
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
# some read ahead for long long
oldtmp = token
token = self.token()
if token[0] == "name" and token[1] == "long":
self.type = self.type + " " + token[1]
else:
self.push(token)
token = oldtmp
oldtmp = token
token = self.token()
if token[0] == "name" and token[1] == "int":
self.type = self.type + " " + token[1]
else:
self.push(token)
token = oldtmp
elif token[0] == "name" and token[1] == "short":
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
elif token[0] == "name" and token[1] == "struct":
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
token = self.token()
nametok = None
if token[0] == "name":
nametok = token
token = self.token()
if token != None and token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseStruct(token)
elif token != None and token[0] == "op" and token[1] == "*":
self.type = self.type + " " + nametok[1] + " *"
token = self.token()
while token != None and token[0] == "op" and token[1] == "*":
self.type = self.type + " *"
token = self.token()
if token[0] == "name":
nametok = token
token = self.token()
else:
self.error("struct : expecting name", token)
return token
elif token != None and token[0] == "name" and nametok != None:
self.type = self.type + " " + nametok[1]
return token
if nametok != None:
self.lexer.push(token)
token = nametok
return token
elif token[0] == "name" and token[1] == "union":
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
token = self.token()
nametok = None
if token[0] == "name":
nametok = token
token = self.token()
if token != None and token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseUnion(token)
elif token != None and token[0] == "name" and nametok != None:
self.type = self.type + " " + nametok[1]
return token
if nametok != None:
self.lexer.push(token)
token = nametok
return token
elif token[0] == "name" and token[1] == "enum":
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
self.enums = []
token = self.token()
if token != None and token[0] == "sep" and token[1] == "{":
token = self.token()
token = self.parseEnumBlock(token)
else:
self.error("parsing enum: expecting '{'", token)
enum_type = None
if token != None and token[0] != "name":
self.lexer.push(token)
token = ("name", "enum")
else:
enum_type = token[1]
for enum in self.enums:
self.index_add(enum[0], self.filename,
not self.is_header, "enum",
(enum[1], enum[2], enum_type))
return token
elif token[0] == "name" and token[1] == "VIR_ENUM_DECL":
token = self.token()
if token != None and token[0] == "sep" and token[1] == "(":
token = self.token()
token = self.parseVirEnumDecl(token)
else:
self.error("parsing VIR_ENUM_DECL: expecting '('", token)
if token != None:
self.lexer.push(token)
token = ("name", "virenumdecl")
return token
elif token[0] == "name" and token[1] == "VIR_ENUM_IMPL":
token = self.token()
if token != None and token[0] == "sep" and token[1] == "(":
token = self.token()
token = self.parseVirEnumImpl(token)
else:
self.error("parsing VIR_ENUM_IMPL: expecting '('", token)
if token != None:
self.lexer.push(token)
token = ("name", "virenumimpl")
return token
elif token[0] == "name":
if self.type == "":
self.type = token[1]
else:
self.type = self.type + " " + token[1]
else:
self.error("parsing type %s: expecting a name" % (self.type),
token)
return token
token = self.token()
while token != None and (token[0] == "op" or
token[0] == "name" and token[1] == "const"):
self.type = self.type + " " + token[1]
token = self.token()
#
# if there is a parenthesis here, this means a function type
#
if token != None and token[0] == "sep" and token[1] == '(':
self.type = self.type + token[1]
token = self.token()
while token != None and token[0] == "op" and token[1] == '*':
self.type = self.type + token[1]
token = self.token()
if token == None or token[0] != "name" :
self.error("parsing function type, name expected", token);
return token
self.type = self.type + token[1]
nametok = token
token = self.token()
if token != None and token[0] == "sep" and token[1] == ')':
self.type = self.type + token[1]
token = self.token()
if token != None and token[0] == "sep" and token[1] == '(':
token = self.token()
type = self.type;
token = self.parseSignature(token);
self.type = type;
else:
self.error("parsing function type, '(' expected", token);
return token
else:
self.error("parsing function type, ')' expected", token);
return token
self.lexer.push(token)
token = nametok
return token
#
# do some lookahead for arrays
#
if token != None and token[0] == "name":
nametok = token
token = self.token()
if token != None and token[0] == "sep" and token[1] == '[':
self.type = self.type + " " + nametok[1]
while token != None and token[0] == "sep" and token[1] == '[':
self.type = self.type + token[1]
token = self.token()
while token != None and token[0] != 'sep' and \
token[1] != ']' and token[1] != ';':
self.type = self.type + token[1]
token = self.token()
if token != None and token[0] == 'sep' and token[1] == ']':
self.type = self.type + token[1]
token = self.token()
else:
self.error("parsing array type, ']' expected", token);
return token
elif token != None and token[0] == "sep" and token[1] == ':':
# remove :12 in case it's a limited int size
token = self.token()
token = self.token()
self.lexer.push(token)
token = nametok
return token
#
# Parse a signature: '(' has been parsed and we scan the type definition
# up to the ')' included
def parseSignature(self, token):
signature = []
if token != None and token[0] == "sep" and token[1] == ')':
self.signature = []
token = self.token()
return token
while token != None:
token = self.parseType(token)
if token != None and token[0] == "name":
signature.append((self.type, token[1], None))
token = self.token()
elif token != None and token[0] == "sep" and token[1] == ',':
token = self.token()
continue
elif token != None and token[0] == "sep" and token[1] == ')':
# only the type was provided
if self.type == "...":
signature.append((self.type, "...", None))
else:
signature.append((self.type, None, None))
if token != None and token[0] == "sep":
if token[1] == ',':
token = self.token()
continue
elif token[1] == ')':
token = self.token()
break
self.signature = signature
return token
# this dict contains the functions that are allowed to use [unsigned]
# long for legacy reasons in their signature and return type. this list is
# fixed. new procedures and public APIs have to use [unsigned] long long
long_legacy_functions = \
{ "virGetVersion" : (False, ("libVer", "typeVer")),
"virConnectGetLibVersion" : (False, ("libVer")),
"virConnectGetVersion" : (False, ("hvVer")),
"virDomainGetMaxMemory" : (True, ()),
"virDomainMigrate" : (False, ("flags", "bandwidth")),
"virDomainMigrate2" : (False, ("flags", "bandwidth")),
"virDomainMigrateBegin3" : (False, ("flags", "bandwidth")),
"virDomainMigrateConfirm3" : (False, ("flags", "bandwidth")),
"virDomainMigrateDirect" : (False, ("flags", "bandwidth")),
"virDomainMigrateFinish" : (False, ("flags")),
"virDomainMigrateFinish2" : (False, ("flags")),
"virDomainMigrateFinish3" : (False, ("flags")),
"virDomainMigratePeer2Peer" : (False, ("flags", "bandwidth")),
"virDomainMigratePerform" : (False, ("flags", "bandwidth")),
"virDomainMigratePerform3" : (False, ("flags", "bandwidth")),
"virDomainMigratePrepare" : (False, ("flags", "bandwidth")),
"virDomainMigratePrepare2" : (False, ("flags", "bandwidth")),
"virDomainMigratePrepare3" : (False, ("flags", "bandwidth")),
"virDomainMigratePrepareTunnel" : (False, ("flags", "bandwidth")),
"virDomainMigratePrepareTunnel3" : (False, ("flags", "bandwidth")),
"virDomainMigrateToURI" : (False, ("flags", "bandwidth")),
"virDomainMigrateToURI2" : (False, ("flags", "bandwidth")),
"virDomainMigrateVersion1" : (False, ("flags", "bandwidth")),
"virDomainMigrateVersion2" : (False, ("flags", "bandwidth")),
"virDomainMigrateVersion3" : (False, ("flags", "bandwidth")),
"virDomainMigrateSetMaxSpeed" : (False, ("bandwidth")),
"virDomainSetMaxMemory" : (False, ("memory")),
"virDomainSetMemory" : (False, ("memory")),
"virDomainSetMemoryFlags" : (False, ("memory")),
"virDomainBlockCommit" : (False, ("bandwidth")),
"virDomainBlockJobSetSpeed" : (False, ("bandwidth")),
"virDomainBlockPull" : (False, ("bandwidth")),
"virDomainBlockRebase" : (False, ("bandwidth")),
"virDomainMigrateGetMaxSpeed" : (False, ("bandwidth")) }
def checkLongLegacyFunction(self, name, return_type, signature):
if "long" in return_type and "long long" not in return_type:
try:
if not CParser.long_legacy_functions[name][0]:
raise Exception()
except:
self.error(("function '%s' is not allowed to return long, "
"use long long instead") % (name))
for param in signature:
if "long" in param[0] and "long long" not in param[0]:
try:
if param[1] not in CParser.long_legacy_functions[name][1]:
raise Exception()
except:
self.error(("function '%s' is not allowed to take long "
"parameter '%s', use long long instead")
% (name, param[1]))
# this dict contains the structs that are allowed to use [unsigned]
# long for legacy reasons. this list is fixed. new structs have to use
# [unsigned] long long
long_legacy_struct_fields = \
{ "_virDomainInfo" : ("maxMem", "memory"),
"_virNodeInfo" : ("memory"),
"_virDomainBlockJobInfo" : ("bandwidth") }
def checkLongLegacyStruct(self, name, fields):
for field in fields:
if "long" in field[0] and "long long" not in field[0]:
try:
if field[1] not in CParser.long_legacy_struct_fields[name]:
raise Exception()
except:
self.error(("struct '%s' is not allowed to contain long "
"field '%s', use long long instead") \
% (name, field[1]))
#
# Parse a global definition, be it a type, variable or function
# the extern "C" blocks are a bit nasty and require it to recurse.
#
def parseGlobal(self, token):
static = 0
if token[1] == 'extern':
token = self.token()
if token == None:
return token
if token[0] == 'string':
if token[1] == 'C':
token = self.token()
if token == None:
return token
if token[0] == 'sep' and token[1] == "{":
token = self.token()
# print 'Entering extern "C line ', self.lineno()
while token != None and (token[0] != 'sep' or
token[1] != "}"):
if token[0] == 'name':
token = self.parseGlobal(token)
else:
self.error(
"token %s %s unexpected at the top level" % (
token[0], token[1]))
token = self.parseGlobal(token)
# print 'Exiting extern "C" line', self.lineno()
token = self.token()
return token
else:
return token
elif token[1] == 'static':
static = 1
token = self.token()
if token == None or token[0] != 'name':
return token
if token[1] == 'typedef':
token = self.token()
return self.parseTypedef(token)
else:
token = self.parseType(token)
type_orig = self.type
if token == None or token[0] != "name":
return token
type = type_orig
self.name = token[1]
token = self.token()
while token != None and (token[0] == "sep" or token[0] == "op"):
if token[0] == "sep":
if token[1] == "[":
type = type + token[1]
token = self.token()
while token != None and (token[0] != "sep" or \
token[1] != ";"):
type = type + token[1]
token = self.token()
if token != None and token[0] == "op" and token[1] == "=":
#
# Skip the initialization of the variable
#
token = self.token()
if token[0] == 'sep' and token[1] == '{':
token = self.token()
token = self.parseBlock(token)
else:
self.comment = None
while token != None and (token[0] != "sep" or \
(token[1] != ';' and token[1] != ',')):
token = self.token()
self.comment = None
if token == None or token[0] != "sep" or (token[1] != ';' and
token[1] != ','):
self.error("missing ';' or ',' after value")
if token != None and token[0] == "sep":
if token[1] == ";":
self.comment = None
token = self.token()
if type == "struct":
self.checkLongLegacyStruct(self.name, self.struct_fields)
self.index_add(self.name, self.filename,
not self.is_header, "struct", self.struct_fields)
else:
self.index_add(self.name, self.filename,
not self.is_header, "variable", type)
break
elif token[1] == "(":
token = self.token()
token = self.parseSignature(token)
if token == None:
return None
if token[0] == "sep" and token[1] == ";":
self.checkLongLegacyFunction(self.name, type, self.signature)
d = self.mergeFunctionComment(self.name,
((type, None), self.signature), 1)
self.index_add(self.name, self.filename, static,
"function", d)
token = self.token()
elif token[0] == "sep" and token[1] == "{":
self.checkLongLegacyFunction(self.name, type, self.signature)
d = self.mergeFunctionComment(self.name,
((type, None), self.signature), static)
self.index_add(self.name, self.filename, static,
"function", d)
token = self.token()
token = self.parseBlock(token);
elif token[1] == ',':
self.comment = None
self.index_add(self.name, self.filename, static,
"variable", type)
type = type_orig
token = self.token()
while token != None and token[0] == "sep":
type = type + token[1]
token = self.token()
if token != None and token[0] == "name":
self.name = token[1]
token = self.token()
else:
break
return token
def parse(self):
if not quiet:
print "Parsing %s" % (self.filename)
token = self.token()
while token != None:
if token[0] == 'name':
token = self.parseGlobal(token)
else:
self.error("token %s %s unexpected at the top level" % (
token[0], token[1]))
token = self.parseGlobal(token)
return
self.parseTopComment(self.top_comment)
return self.index
class docBuilder:
"""A documentation builder"""
def __init__(self, name, path='.', directories=['.'], includes=[]):
self.name = name
self.path = path
self.directories = directories
if name == "libvirt":
self.includes = includes + included_files.keys()
elif name == "libvirt-qemu":
self.includes = includes + qemu_included_files.keys()
elif name == "libvirt-lxc":
self.includes = includes + lxc_included_files.keys()
self.modules = {}
self.headers = {}
self.idx = index()
self.xref = {}
self.index = {}
self.basename = name
def warning(self, msg):
global warnings
warnings = warnings + 1
print msg
def indexString(self, id, str):
if str == None:
return
str = string.replace(str, "'", ' ')
str = string.replace(str, '"', ' ')
str = string.replace(str, "/", ' ')
str = string.replace(str, '*', ' ')
str = string.replace(str, "[", ' ')
str = string.replace(str, "]", ' ')
str = string.replace(str, "(", ' ')
str = string.replace(str, ")", ' ')
str = string.replace(str, "<", ' ')
str = string.replace(str, '>', ' ')
str = string.replace(str, "&", ' ')
str = string.replace(str, '#', ' ')
str = string.replace(str, ",", ' ')
str = string.replace(str, '.', ' ')
str = string.replace(str, ';', ' ')
tokens = string.split(str)
for token in tokens:
try:
c = token[0]
if string.find(string.letters, c) < 0:
pass
elif len(token) < 3:
pass
else:
lower = string.lower(token)
# TODO: generalize this a bit
if lower == 'and' or lower == 'the':
pass
elif self.xref.has_key(token):
self.xref[token].append(id)
else:
self.xref[token] = [id]
except:
pass
def analyze(self):
if not quiet:
print "Project %s : %d headers, %d modules" % (self.name, len(self.headers.keys()), len(self.modules.keys()))
self.idx.analyze()
def scanHeaders(self):
for header in self.headers.keys():
parser = CParser(header)
idx = parser.parse()
self.headers[header] = idx;
self.idx.merge(idx)
def scanModules(self):
for module in self.modules.keys():
parser = CParser(module)
idx = parser.parse()
# idx.analyze()
self.modules[module] = idx
self.idx.merge_public(idx)
def scan(self):
for directory in self.directories:
files = glob.glob(directory + "/*.c")
for file in files:
skip = 1
for incl in self.includes:
if string.find(file, incl) != -1:
skip = 0;
break
if skip == 0:
self.modules[file] = None;
files = glob.glob(directory + "/*.h")
for file in files:
skip = 1
for incl in self.includes:
if string.find(file, incl) != -1:
skip = 0;
break
if skip == 0:
self.headers[file] = None;
self.scanHeaders()
self.scanModules()
def modulename_file(self, file):
module = os.path.basename(file)
if module[-2:] == '.h':
module = module[:-2]
elif module[-2:] == '.c':
module = module[:-2]
return module
def serialize_enum(self, output, name):
id = self.idx.enums[name]
output.write(" <enum name='%s' file='%s'" % (name,
self.modulename_file(id.header)))
if id.info != None:
info = id.info
if info[0] != None and info[0] != '':
try:
val = eval(info[0])
except:
val = info[0]
output.write(" value='%s'" % (val));
if info[2] != None and info[2] != '':
output.write(" type='%s'" % info[2]);
if info[1] != None and info[1] != '':
output.write(" info='%s'" % escape(info[1]));
output.write("/>\n")
def serialize_macro(self, output, name):
id = self.idx.macros[name]
output.write(" <macro name='%s' file='%s'>\n" % (name,
self.modulename_file(id.header)))
if id.info != None:
try:
(args, desc) = id.info
if desc != None and desc != "":
output.write(" <info><![CDATA[%s]]></info>\n" % (desc))
self.indexString(name, desc)
for arg in args:
(name, desc) = arg
if desc != None and desc != "":
output.write(" <arg name='%s' info='%s'/>\n" % (
name, escape(desc)))
self.indexString(name, desc)
else:
output.write(" <arg name='%s'/>\n" % (name))
except:
pass
output.write(" </macro>\n")
def serialize_union(self, output, field, desc):
output.write(" <field name='%s' type='union' info='%s'>\n" % (field[1] , desc))
output.write(" <union>\n")
for f in field[3]:
desc = f[2]
if desc == None:
desc = ''
else:
desc = escape(desc)
output.write(" <field name='%s' type='%s' info='%s'/>\n" % (f[1] , f[0], desc))
output.write(" </union>\n")
output.write(" </field>\n")
def serialize_typedef(self, output, name):
id = self.idx.typedefs[name]
if id.info[0:7] == 'struct ':
output.write(" <struct name='%s' file='%s' type='%s'" % (
name, self.modulename_file(id.header), id.info))
name = id.info[7:]
if self.idx.structs.has_key(name) and ( \
type(self.idx.structs[name].info) == type(()) or
type(self.idx.structs[name].info) == type([])):
output.write(">\n");
try:
for field in self.idx.structs[name].info:
desc = field[2]
self.indexString(name, desc)
if desc == None:
desc = ''
else:
| desc = escape(desc) | 8,020 | lcc_e | python | null | d5119d291024c6c23d0a2c14f328ff4cd89641fb653fb509 |
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Spatial pooler implementation.
TODO: Change print statements to use the logging module.
"""
import copy
import cPickle
import inspect
import itertools
import random
import sys
import time
import numpy
import numpy.random
from nupic.bindings.algorithms import (adjustMasterValidPermanence, cpp_overlap,
Inhibition2)
from nupic.bindings.math import (count_gte, GetNTAReal, Random as NupicRandom,
SM_01_32_32, SM32)
from nupic.math.cross import cross
from nupic.research import fdrutilities as fdru
realDType = GetNTAReal()
gPylabInitialized = False
# kDutyCycleFactor add dutyCycleAfterInh to overlap in Inhibition step to be a
# tie breaker
kDutyCycleFactor = 0.01
def _extractCallingMethodArgs():
"""
Returns args dictionary from the calling method
"""
callingFrame = inspect.stack()[1][0]
argNames, _, _, frameLocalVarDict = inspect.getargvalues(callingFrame)
argNames.remove("self")
args = copy.copy(frameLocalVarDict)
for varName in frameLocalVarDict:
if varName not in argNames:
args.pop(varName)
return args
class FDRCSpatial2(object):
"""
Class for spatial pooling based on fixed random distributed
representation (FDR).
This version of FDRCSpatial inlcudes adaptive receptive fields, no-dupe rules
and gradual boosting. It supports 1-D and 2-D topologies with cloning.
"""
def __init__(self,
inputShape=(32, 32),
inputBorder=8,
inputDensity=1.0,
coincidencesShape=(48, 48),
coincInputRadius=16,
coincInputPoolPct=1.0,
gaussianDist=False,
commonDistributions=False,
localAreaDensity=-1.0,
numActivePerInhArea=10.0,
stimulusThreshold=0,
synPermInactiveDec=0.01,
synPermActiveInc=0.1,
synPermActiveSharedDec=0.0,
synPermOrphanDec=0.0,
synPermConnected=0.10,
minPctDutyCycleBeforeInh=0.001,
minPctDutyCycleAfterInh=0.001,
dutyCyclePeriod=1000,
maxFiringBoost=10.0,
maxSSFiringBoost=2.0,
maxSynPermBoost=10.0,
minDistance=0.0,
cloneMap=None,
numCloneMasters=-1,
seed=-1,
spVerbosity=0,
printPeriodicStats=0,
testMode=False,
globalInhibition=False,
spReconstructionParam="unweighted_mean",
useHighTier=True,
randomSP=False,
):
"""
Parameters:
----------------------------
inputShape: The dimensions of the input vector. Format is (height,
width) e.g. (24, 72). If the input is from a sensor,
it is interpreted as having a 2-D topology of 24
pixels high and 72 wide.
inputBorder: The first column from an edge will be centered
over an input which is 'inputBorder' inputs from the
edge.
inputDensity: The density of the input. This is only to aid in
figuring out the initial number of connected synapses
to place on each column. The lower the inputDensity,
the more initial connections will be assigned to each
column.
coincidencesShape: The dimensions of column layout. Format is (height,
width) e.g. (80,100) means a total of 80*100 = 800 are
arranged in a 2-D topology with 80 rows and
100 columns.
coincInputRadius: This defines the max radius of the receptive field of
each column. This is used to limit memory requirements
and processing time. It could be set large enough to
encompass the entire input field and the SP would
still work fine, but require more memory and
processing time.
This parameter defines a square area: a column will
have a max square RF with sides of length
2 * coincInputRadius + 1.
coincInputPoolPct What percent of the columns's receptive field is
available for potential synapses. At initialization
time, we will choose
coincInputPoolPct * (2*coincInputRadius + 1)^2
potential synapses from the receptive field.
gaussianDist: If true, the initial permanences assigned to each
column will have a gaussian distribution to them,
making the column favor inputs directly below it over
inputs farther away. If false, the initial permanences
will have a random distribution across the column's
entire potential receptive field.
commonDistributions: If set to True (the default, faster startup time),
each column will be given the same initial permanence
values.
This is normally OK when you will be training, but if
you will be sticking with the untrained network,
you will want to set this to False (which makes
startup take longer).
localAreaDensity: The desired density of active columns within a local
inhibition area (the size of which is set by the
internally calculated inhibitionRadius, which is in
turn determined from the average size of the connected
receptive fields of all columns). The inhibition logic
will insure that at most N columns remain ON within a
local inhibition area, where N = localAreaDensity *
(total number of columns in inhibition area).
numActivePerInhArea: An alternate way to control the density of the active
columns. If numActivePerInhArea is specified then
localAreaDensity must be -1, and vice versa. When
using numActivePerInhArea, the inhibition logic will
insure that at most 'numActivePerInhArea' columns
remain ON within a local inhibition area (the size of
which is set by the internally calculated
inhibitionRadius, which is in turn determined from the
average size of the connected receptive fields of all
columns). When using this method, as columns learn and
grow their effective receptive fields, the
inhibitionRadius will grow, and hence the net density
of the active columns will *decrease*. This is in
contrast to the localAreaDensity method, which keeps
the density of active columns the same regardless of
the size of their receptive fields.
stimulusThreshold: This is a number specifying the minimum number of
synapses that must be on in order for a columns to
turn ON.
The purpose of this is to prevent noise input from
activating columns.
synPermInactiveDec: How much an inactive synapse is decremented, specified
as a percent of a fully grown synapse.
synPermActiveInc: How much to increase the permanence of an active
synapse, specified as a percent of a fully grown
synapse.
synPermActiveSharedDec: How much to decrease the permanence of an active
synapse which is connected to another column that is
active at the same time. Specified as a percent of a
fully grown synapse.
synPermOrphanDec: How much to decrease the permanence of an active
synapse on a column which has high overlap with the
input, but was inhibited (an "orphan" column).
synPermConnected: The default connected threshold. Any synapse whose
permanence value is above the connected threshold is
a "connected synapse", meaning it can contribute to
the cell's firing. Typical value is 0.10. Cells whose
activity level before inhibition falls below
minDutyCycleBeforeInh will have their own internal
synPermConnectedCell threshold set below this default
value.
(This concept applies to both SP and TP and so 'cells'
is correct here as opposed to 'columns')
minPctDutyCycleBeforeInh: A number between 0 and 1.0, used to set a floor on
how often a column should have at least
stimulusThreshold active inputs. Periodically, each
column looks at the duty cycle before inhibition of
all other column within its inhibition radius and sets
its own internal minimal acceptable duty cycle to:
minPctDutyCycleBeforeInh *
max(other columns' duty cycles).
On each iteration, any column whose duty cycle before
inhibition falls below this computed value will get
all of its permanence values boosted up by
synPermActiveInc. Raising all permanences in response
to a sub-par duty cycle before inhibition allows a
cell to search for new inputs when either its
previously learned inputs are no longer ever active,
or when the vast majority of them have been "hijacked"
by other columns due to the no-dupe rule.
minPctDutyCycleAfterInh: A number between 0 and 1.0, used to set a floor on
how often a column should turn ON after inhibition.
Periodically, each column looks at the duty cycle
after inhibition of all other columns within its
inhibition radius and sets its own internal minimal
acceptable duty cycle to:
minPctDutyCycleAfterInh *
max(other columns' duty cycles).
On each iteration, any column whose duty cycle after
inhibition falls below this computed value will get
its internal boost factor increased.
dutyCyclePeriod: The period used to calculate duty cycles. Higher
values make it take longer to respond to changes in
boost or synPerConnectedCell. Shorter values make it
more unstable and likely to oscillate.
maxFiringBoost: The maximum firing level boost factor. Each column's
raw firing strength gets multiplied by a boost factor
before it gets considered for inhibition.
The actual boost factor for a column is number between
1.0 and maxFiringBoost. A boost factor of 1.0 is used
if the duty cycle is >= minDutyCycle, maxFiringBoost
is used if the duty cycle is 0, and any duty cycle in
between is linearly extrapolated from these 2
endpoints.
maxSSFiringBoost: Once a column turns ON, it's boost will immediately
fall down to maxSSFiringBoost if it is above it. This
is accomplished by internally raising it's computed
duty cycle accordingly. This prevents a cell which has
had it's boost raised extremely high from turning ON
for too many diverse inputs in a row within a short
period of time.
maxSynPermBoost: The maximum synPermActiveInc boost factor. Each
column's synPermActiveInc gets multiplied by a boost
factor to make the column more or less likely to form
new connections.
The actual boost factor used is a number between
1.0 and maxSynPermBoost. A boost factor of 1.0 is used
if the duty cycle is >= minDutyCycle, maxSynPermBoost
is used if the duty cycle is 0, and any duty cycle
in between is linearly extrapolated from these 2
endpoints.
minDistance: This parameter impacts how finely the input space is
quantized. It is a value between 0 and 1.0. If set
to 0, then every unique input presentation will
generate a unique output representation, within the
limits of the total number of columns available.
Higher values will tend to group similar inputs
together into the same output representation. Only
column which overlap with the input less than
100*(1.0-minDistance) percent will
have a possibility of losing the inhibition
competition against a boosted, 'bored' cell.
cloneMap: An array (numColumnsHigh, numColumnsWide) that
contains the clone index to use for each column.
numCloneMasters: The number of distinct clones in the map. This
is just outputCloningWidth*outputCloningHeight.
seed: Seed for our own pseudo-random number generator.
spVerbosity: spVerbosity level: 0, 1, 2, or 3
printPeriodicStats: If > 0, then every 'printPeriodicStats' iterations,
the SP will print to stdout some statistics related to
learning, such as the average pct under and
over-coverage, average number of active columns, etc.
in the last 'showLearningStats' iterations.
testMode: If True, run the SP in test mode. This runs both the
C++ and python implementations on all internal
functions that support both and insures that both
produce the same result.
globalInhibition: If true, enforce the
localAreaDensity/numActivePerInhArea
globally over the entire region, ignoring any
dynamically calculated inhibitionRadius. In effect,
this is the same as setting the inhibition radius to
include the entire region.
spReconstructionParam:Specifies which SP reconstruction optimization to be
used. Each column's firing strength is weighted by the
percent Overlap, permanence or duty Cycle if this
parameter is set to 'pctOverlap', 'permanence', or
'dutycycle' respectively. If parameter is set to
'maximum_firingstrength', the maximum of the firing
strengths (weighted by permanence) is used instead of
the weighted sum.
useHighTier: The "high tier" feature is to deal with sparse input
spaces. If over (1-minDistance) percent of a column's
connected synapses are active, it will automatically
become one of the winning columns.
If False, columns are activated based on their absolute
overlap with the input. Also, boosting will be
disabled to prevent pattern oscillation
randomSP: If True, the SP will not update its permanences and
will instead use it's initial configuration for all
inferences.
"""
# Save our __init__ args for debugging
self._initArgsDict = _extractCallingMethodArgs()
# Handle people instantiating us directly that don't pass in a cloneMap...
# This creates a clone map without any cloning
if cloneMap is None:
cloneMap, numCloneMasters = fdru.makeCloneMap(
columnsShape=coincidencesShape,
outputCloningWidth=coincidencesShape[1],
outputCloningHeight=coincidencesShape[0]
)
self.numCloneMasters = numCloneMasters
self._cloneMapFlat = cloneMap.reshape((-1,))
# Save creation parameters
self.inputShape = int(inputShape[0]), int(inputShape[1])
self.inputBorder = inputBorder
self.inputDensity = inputDensity
self.coincidencesShape = coincidencesShape
self.coincInputRadius = coincInputRadius
self.coincInputPoolPct = coincInputPoolPct
self.gaussianDist = gaussianDist
self.commonDistributions = commonDistributions
self.localAreaDensity = localAreaDensity
self.numActivePerInhArea = numActivePerInhArea
self.stimulusThreshold = stimulusThreshold
self.synPermInactiveDec = synPermInactiveDec
self.synPermActiveInc = synPermActiveInc
self.synPermActiveSharedDec = synPermActiveSharedDec
self.synPermOrphanDec = synPermOrphanDec
self.synPermConnected = synPermConnected
self.minPctDutyCycleBeforeInh = minPctDutyCycleBeforeInh
self.minPctDutyCycleAfterInh = minPctDutyCycleAfterInh
self.dutyCyclePeriod = dutyCyclePeriod
self.requestedDutyCyclePeriod = dutyCyclePeriod
self.maxFiringBoost = maxFiringBoost
self.maxSSFiringBoost = maxSSFiringBoost
self.maxSynPermBoost = maxSynPermBoost
self.minDistance = minDistance
self.spVerbosity = spVerbosity
self.printPeriodicStats = printPeriodicStats
self.testMode = testMode
self.globalInhibition = globalInhibition
self.spReconstructionParam = spReconstructionParam
self.useHighTier= useHighTier != 0
self.randomSP = randomSP != 0
self.fileCount = 0
self._runIter = 0
# Start at iteration #0
self._iterNum = 0 # Number of learning iterations
self._inferenceIterNum = 0 # Number of inference iterations
# Print creation parameters
if spVerbosity >= 1:
self.printParams()
print "seed =", seed
# Check for errors
assert (self.numActivePerInhArea == -1 or self.localAreaDensity == -1)
assert (self.inputShape[1] > 2 * self.inputBorder)
# 1D layouts have inputShape[0] == 1
if self.inputShape[0] > 1:
assert self.inputShape[0] > 2 * self.inputBorder
# Calculate other member variables
self._coincCount = int(self.coincidencesShape[0] *
self.coincidencesShape[1])
self._inputCount = int(self.inputShape[0] * self.inputShape[1])
self._synPermMin = 0.0
self._synPermMax = 1.0
self._pylabInitialized = False
# The rate at which we bump up all synapses in response to not passing
# stimulusThreshold
self._synPermBelowStimulusInc = self.synPermConnected / 10.0
self._hasTopology = True
if self.inputShape[0] == 1: # 1-D layout
self._coincRFShape = (1, (2 * coincInputRadius + 1))
# If we only have 1 column of coincidences, then assume the user wants
# each coincidence to cover the entire input
if self.coincidencesShape[1] == 1:
assert self.inputBorder >= (self.inputShape[1] - 1) // 2
assert coincInputRadius >= (self.inputShape[1] - 1) // 2
self._coincRFShape = (1, self.inputShape[1])
self._hasTopology = False
else: # 2-D layout
self._coincRFShape = ((2*coincInputRadius + 1), (2*coincInputRadius + 1))
# This gets set to True in finishLearning. Once set, we don't allow
# learning anymore and delete all member variables needed only for
# learning.
self._doneLearning = False
# Init random seed
self._seed(seed)
# Hard-coded in the current case
self.randomTieBreakingFraction = 0.5
# The permanence values used to initialize the master coincs are from
# this initial permanence array
# The initial permanence is gaussian shaped with mean at center and variance
# carefully chosen to have connected synapses
initialPermanence = self._initialPermanence()
# masterPotentialM, masterPermanenceM and masterConnectedM are numpy arrays
# of dimensions (coincCount, coincRfShape[0], coincRFShape[1])
#
# masterPotentialM: Keeps track of the potential synapses of each
# master. Potential synapses are marked as True
# masterPermanenceM: Holds the permanence values of the potential synapses.
# The values can range from 0.0 to 1.0
# masterConnectedM: Keeps track of the connected synapses of each
# master. Connected synapses are the potential synapses
# with permanence values greater than synPermConnected.
self._masterPotentialM, self._masterPermanenceM = (
self._makeMasterCoincidences(self.numCloneMasters, self._coincRFShape,
self.coincInputPoolPct, initialPermanence,
self.random))
# Update connected coincidences, the connected synapses have permanence
# values greater than synPermConnected.
self._masterConnectedM = []
dense = numpy.zeros(self._coincRFShape)
for i in xrange(self.numCloneMasters):
self._masterConnectedM.append(SM_01_32_32(dense))
# coinc sizes are used in normalizing the raw overlaps
self._masterConnectedCoincSizes = numpy.empty(self.numCloneMasters,
'uint32')
# Make one mondo coincidence matrix for all cells at once. It has one row
# per cell. The width of each row is the entire input width. There will be
# ones in each row where that cell has connections. When we have cloning,
# and we modify the connections for a clone master, we will update all
# cells that share that clone master with the new connections.
self._allConnectedM = SM_01_32_32(self._inputCount)
self._allConnectedM.resize(self._coincCount, self._inputCount)
# Initialize the dutyCycles and boost factors per clone master
self._dutyCycleBeforeInh = numpy.zeros(self.numCloneMasters,
dtype=realDType)
self._minDutyCycleBeforeInh = numpy.zeros(self.numCloneMasters,
dtype=realDType)
self._dutyCycleAfterInh = numpy.zeros(self.numCloneMasters,
dtype=realDType)
self._minDutyCycleAfterInh = numpy.zeros(self.numCloneMasters,
dtype=realDType)
# TODO: We don't need to store _boostFactors, can be calculated from duty
# cycle
self._firingBoostFactors = numpy.ones(self.numCloneMasters,
dtype=realDType)
if self.useHighTier:
self._firingBoostFactors *= maxFiringBoost
# Selectively turn on/off C++ for various methods
# TODO: Can we remove the conditional?
if self.testMode:
self._computeOverlapsImp = "py" # "py or "cpp" or "test"
self._updatePermanenceGivenInputImp = "py" # "py" or "cpp or "test"
else:
self._computeOverlapsImp = "py" # "py or "cpp" or "test"
self._updatePermanenceGivenInputImp = "py" # "py" or "cpp or "test"
# This is used to hold our learning stats (via getLearningStats())
self._learningStats = dict()
# These will hold our random state, which we return from __getstate__ and
# reseed our random number generators from in __setstate__ so that
# a saved/restored SP produces the exact same behavior as one that
# continues. This behavior allows us to write unit tests that verify that
# the behavior of an SP does not change due to saving/loading from a
# checkpoint
self._randomState = None
self._numpyRandomState = None
self._nupicRandomState = None
# Init ephemeral members
# This also calculates the slices and global inhibitionRadius and allocates
# the inhibitionObj
self._initEphemerals()
# If we have no cloning, make sure no column has potential or connected
# synapses outside the input area
if self.numCloneMasters == self._coincCount:
validMask = numpy.zeros(self._coincRFShape, dtype=realDType)
for masterNum in xrange(self._coincCount):
coincSlice = self._coincSlices[masterNum]
validMask.fill(0)
validMask[coincSlice] = 1
self._masterPotentialM[masterNum].logicalAnd(SM_01_32_32(validMask))
self._masterPermanenceM[masterNum].elementMultiply(validMask)
# Raise all permanences up until the number of connected is above
# our desired target,
self._raiseAllPermanences(masterNum,
minConnections = self.stimulusThreshold / self.inputDensity)
# Calculate the number of connected synapses in each master coincidence now
self._updateConnectedCoincidences()
def _getEphemeralMembers(self):
"""
List of our member variables that we don't need to be saved
"""
return ['_inputLayout',
'_cellsForMaster',
'_columnCenters',
#'_cellRFClipped',
'_inputSlices',
'_coincSlices',
'_activeInput',
'_permChanges',
'_dupeInput',
'_onCells',
'_masterOnCells',
'_onCellIndices',
'_inhibitionObj',
'_denseOutput',
'_overlaps',
'_anomalyScores',
'_inputUse',
'_updatePermanenceGivenInputFP',
'_computeOverlapsFP',
'_stats',
'_rfRadiusAvg',
'_rfRadiusMin',
'_rfRadiusMax',
'_topDownOut',
'_topDownParentCounts',
]
def _initEphemerals(self):
"""
Initialize all ephemeral members after being restored to a pickled state.
"""
# Used by functions which refers to inputs in absolute space
# getLearnedCM, cm,....
self._inputLayout = numpy.arange(self._inputCount,
dtype=numpy.uint32).reshape(self.inputShape)
# This array returns the list of cell indices that correspond to each master
cloningOn = (self.numCloneMasters != self._coincCount)
if cloningOn:
self._cellsForMaster = []
for masterNum in xrange(self.numCloneMasters):
self._cellsForMaster.append(
numpy.where(self._cloneMapFlat == masterNum)[0])
else:
self._cellsForMaster = None
# TODO: slices are not required for the C++ helper functions
# Figure out the slices of shaped input that each column sees...
# Figure out the valid region of each column
# The reason these slices are in initEphemerals is because numpy slices
# can't be pickled
self._setSlices()
# This holds the output of the inhibition computation - which cells are
# on after inhibition
self._onCells = numpy.zeros(self._coincCount, dtype=realDType)
self._masterOnCells = numpy.zeros(self.numCloneMasters, dtype=realDType)
self._onCellIndices = numpy.zeros(self._coincCount, dtype='uint32')
# The inhibition object gets allocated by _updateInhibitionObj() during
# the first compute and re-allocated periodically during learning
self._inhibitionObj = None
self._rfRadiusAvg = 0 # Also calculated by _updateInhibitionObj
self._rfRadiusMin = 0
self._rfRadiusMax = 0
# Used by the caller to optionally cache the dense output
self._denseOutput = None
# This holds the overlaps (in absolute number of connected synapses) of each
# coinc with input.
self._overlaps = numpy.zeros(self._coincCount, dtype=realDType)
# This holds the percent overlaps (number of active inputs / number of
# connected synapses) of each coinc with input.
self._pctOverlaps = numpy.zeros(self._coincCount, dtype=realDType)
# This is the value of the anomaly score for each column (after inhibition).
self._anomalyScores = numpy.zeros_like(self._overlaps)
# This holds the overlaps before stimulus threshold - used for verbose
# messages only.
self._overlapsBST = numpy.zeros(self._coincCount, dtype=realDType)
# This holds the number of coincs connected to an input.
if not self._doneLearning:
self._inputUse = numpy.zeros(self.inputShape, dtype=realDType)
# These are boolean matrices, the same shape as the input.
if not self._doneLearning:
self._activeInput = numpy.zeros(self.inputShape, dtype='bool')
self._dupeInput = numpy.zeros(self.inputShape, dtype='bool')
# This is used to hold self.synPermActiveInc where the input is on
# and -self.synPermInctiveDec where the input is off
if not self._doneLearning:
self._permChanges = numpy.zeros(self.inputShape, dtype=realDType)
# These are used to compute and hold the output from topDownCompute
# self._topDownOut = numpy.zeros(self.inputShape, dtype=realDType)
# self._topDownParentCounts = numpy.zeros(self.inputShape, dtype='int')
# Fill in the updatePermanenceGivenInput method pointer, which depends on
# chosen language.
if self._updatePermanenceGivenInputImp == "py":
self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputPy
elif self._updatePermanenceGivenInputImp == "cpp":
self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputCPP
elif self._updatePermanenceGivenInputImp == "test":
self._updatePermanenceGivenInputFP = self._updatePermanenceGivenInputTest
else:
assert False
# Fill in the computeOverlaps method pointer, which depends on
# chosen language.
if self._computeOverlapsImp == "py":
self._computeOverlapsFP = self._computeOverlapsPy
elif self._computeOverlapsImp == "cpp":
self._computeOverlapsFP = self._computeOverlapsCPP
elif self._computeOverlapsImp == "test":
self._computeOverlapsFP = self._computeOverlapsTest
else:
assert False
# These variables are used for keeping track of learning statistics (when
# self.printPeriodicStats is used).
self._periodicStatsCreate()
def compute(self, flatInput, learn=False, infer=True, computeAnomaly=False):
"""Compute with the current input vector.
Parameters:
----------------------------
input : the input vector (numpy array)
learn : if True, adapt the input histogram based on this input
infer : whether to do inference or not
"""
# If we are using a random SP, ignore the learn parameter
if self.randomSP:
learn = False
# If finishLearning has been called, don't allow learning anymore
if learn and self._doneLearning:
raise RuntimeError("Learning can not be performed once finishLearning"
" has been called.")
assert (learn or infer)
assert (flatInput.ndim == 1) and (flatInput.shape[0] == self._inputCount)
assert (flatInput.dtype == realDType)
input = flatInput.reshape(self.inputShape)
# Make sure we've allocated the inhibition object lazily
if self._inhibitionObj is None:
self._updateInhibitionObj()
# Reset first timer
if self.printPeriodicStats > 0 and self._iterNum == 0:
self._periodicStatsReset()
# Using cloning?
cloningOn = (self.numCloneMasters != self._coincCount)
# If we have high verbosity, save the overlaps before stimulus threshold
# so we can print them out at the end
if self.spVerbosity >= 2:
print "==============================================================="
print "Iter:%d" % self._iterNum, "inferenceIter:%d" % \
self._inferenceIterNum
self._computeOverlapsFP(input, stimulusThreshold=0)
self._overlapsBST[:] = self._overlaps
connectedCountsOnEntry = self._masterConnectedCoincSizes.copy()
if self.spVerbosity >= 3:
inputNZ = flatInput.nonzero()[0]
print "active inputs: (%d)" % len(inputNZ), inputNZ
# TODO: Port to C++, arguments may be different - t1YXArr,
# coincInputRadius,...
# Calculate the raw overlap of each cell
# Overlaps less than stimulus threshold are set to zero in
# _calculateOverlaps
# This places the result into self._overlaps
self._computeOverlapsFP(input, stimulusThreshold=self.stimulusThreshold)
# Save the original overlap values, before boosting, for the purpose of
# anomaly detection
if computeAnomaly:
self._anomalyScores[:] = self._overlaps[:]
if learn:
# Update each cell's duty cycle before inhibition
# Only cells with overlaps greater stimulus threshold are considered as
# active.
# Stimulus threshold has already been applied
# TODO: Port to C++? Loops over all coincs
# Only updating is carried out here, bump up happens later
onCellIndices = numpy.where(self._overlaps > 0)
if cloningOn:
onMasterIndices = self._cloneMapFlat[onCellIndices]
self._masterOnCells.fill(0)
self._masterOnCells[onMasterIndices] = 1
denseOn = self._masterOnCells
else:
self._onCells.fill(0)
self._onCells[onCellIndices] = 1
denseOn = self._onCells
# dutyCyclePeriod = self._iterNum + 1 let _dutyCycleBeforeInh
# and _dutyCycleAfterInh represent real firing percentage at the
# beginning of learning. This will effect boosting and let unlearned
# coincidences have high boostFactor at beginning.
self.dutyCyclePeriod = min(self._iterNum + 1,
self.requestedDutyCyclePeriod)
# Compute a moving average of the duty cycle before inhibition
self._dutyCycleBeforeInh = (
((self.dutyCyclePeriod - 1) * self._dutyCycleBeforeInh + denseOn) /
self.dutyCyclePeriod)
# Compute firing levels based on boost factor and raw overlap. Update
# self._overlaps in place, replacing it with the boosted overlap. We also
# computes percent overlap of each column and store that into
# self._pctOverlaps
if cloningOn:
self._pctOverlaps[:] = self._overlaps
self._pctOverlaps /= self._masterConnectedCoincSizes[self._cloneMapFlat]
boostFactors = self._firingBoostFactors[self._cloneMapFlat]
else:
self._pctOverlaps[:] = self._overlaps
potentials = self._masterConnectedCoincSizes
self._pctOverlaps /= numpy.maximum(1, potentials)
boostFactors = self._firingBoostFactors
# To process minDistance, we do the following:
# 1.) All cells which do not overlap the input "highly" (less than
# minDistance), are considered to be in the "low tier" and get their
# overlap multiplied by their respective boost factor.
# 2.) All other cells, which DO overlap the input highly, get a "high tier
# offset" added to their overlaps, and boost is not applied. The
# "high tier offset" is computed as the max of all the boosted
# overlaps from step #1. This insures that a cell in this high tier
# will never lose to a cell from the low tier.
if self.useHighTier:
highTier = numpy.where(self._pctOverlaps >= (1.0 - self.minDistance))[0]
else:
highTier = []
someInHighTier = len(highTier) > 0
if someInHighTier:
boostFactors = numpy.array(boostFactors)
boostFactors[highTier] = 1.0
# Apply boostFactors only in learning phase not in inference phase.
if learn:
self._overlaps *= boostFactors
if someInHighTier:
highTierOffset = self._overlaps.max() + 1.0
self._overlaps[highTier] += highTierOffset
# Cache the dense output for debugging.
if self._denseOutput is not None:
self._denseOutput = self._overlaps.copy()
# Incorporate inhibition and see who is firing after inhibition.
# We don't need this method to process stimulusThreshold because we
# already processed it.
# Also, we pass in a small 'addToWinners' amount which gets added to the
# winning elements as we go along. This prevents us from choosing more than
# topN winners per inhibition region when more than topN elements all have
# the same max high score.
learnedCellsOverlaps = numpy.array(self._overlaps)
if infer and not learn:
# Cells that have never learnt are not allowed to win during inhibition
if not self.randomSP:
learnedCellsOverlaps[numpy.where(self._dutyCycleAfterInh == 0)[0]] = 0
else:
# Boost the unlearned cells to 1000 so that the winning columns are
# picked randomly. From the set of unlearned columns. Boost columns that
# havent been learned with uniformly to 1000 so that inhibition picks
# randomly from them.
if self.useHighTier:
learnedCellsOverlaps[numpy.where(self._dutyCycleAfterInh == 0)[0]] = (
learnedCellsOverlaps.max() + 1)
# Boost columns that are in highTier (ie. they match the input very
# well).
learnedCellsOverlaps[highTier] += learnedCellsOverlaps.max() + 1
# Small random tiebreaker for columns with equal overlap
tieBreaker = numpy.random.rand(*learnedCellsOverlaps.shape).astype(
realDType)
learnedCellsOverlaps += 0.1 * tieBreaker
numOn = self._inhibitionObj.compute(
learnedCellsOverlaps,
self._onCellIndices,
0.0, # stimulusThreshold
max(learnedCellsOverlaps)/1000.0, # addToWinners
)
self._onCells.fill(0)
if numOn > 0:
onCellIndices = self._onCellIndices[0:numOn]
self._onCells[onCellIndices] = 1
else:
onCellIndices = []
# Compute the anomaly scores only for the winning columns.
if computeAnomaly:
self._anomalyScores *= self._onCells
self._anomalyScores *= self._dutyCycleAfterInh
if self.spVerbosity >= 2:
print "inhRadius", self._inhibitionObj.getInhibitionRadius()
print "inhLocalAreaDensity", self._inhibitionObj.getLocalAreaDensity()
print "numFiring", numOn
# Capturing learning stats? If so, capture the cell overlap statistics
if self.printPeriodicStats > 0:
activePctOverlaps = self._pctOverlaps[onCellIndices]
self._stats['cellPctOverlapSums'] += activePctOverlaps.sum()
if cloningOn:
onMasterIndices = self._cloneMapFlat[onCellIndices]
else:
onMasterIndices = onCellIndices
self._stats['cellOverlapSums'] += (
activePctOverlaps *
self._masterConnectedCoincSizes[onMasterIndices]).sum()
# Compute which cells had very high overlap, but were still
# inhibited. These we are calling our "orphan cells", because they are
# representing an input which is already better represented by another
# cell.
if self.synPermOrphanDec > 0:
orphanCellIndices = set(numpy.where(self._pctOverlaps >= 1.0)[0])
orphanCellIndices.difference_update(onCellIndices)
else:
orphanCellIndices = []
if learn:
# Update the number of coinc connections per input
# During learning (adapting permanence values), we need to be able to
# recognize dupe inputs - inputs that go two 2 or more active cells
if self.synPermActiveSharedDec != 0:
self._updateInputUse(onCellIndices)
# For the firing cells, update permanence values.
onMasterIndices = self._adaptSynapses(onCellIndices, orphanCellIndices,
input)
# Increase the permanence values of columns which haven't passed
# stimulus threshold of overlap with at least a minimum frequency
self._bumpUpWeakCoincidences()
# Update each cell's after-inhibition duty cycle
if cloningOn:
self._masterOnCells.fill(0)
self._masterOnCells[onMasterIndices] = 1
denseOn = self._masterOnCells
else:
denseOn = self._onCells
# Compute a moving average of the duty cycle after inhibition
self._dutyCycleAfterInh = ((
(self.dutyCyclePeriod - 1) * self._dutyCycleAfterInh + denseOn) /
self.dutyCyclePeriod)
# Update the boost factors based on firings rate after inhibition.
self._updateBoostFactors()
# Increment iteration number and perform our periodic tasks if it's time.
if (self._iterNum + 1) % 50 == 0:
self._updateInhibitionObj()
self._updateMinDutyCycles(
self._dutyCycleBeforeInh, self.minPctDutyCycleBeforeInh,
self._minDutyCycleBeforeInh)
self._updateMinDutyCycles(
self._dutyCycleAfterInh, self.minPctDutyCycleAfterInh,
self._minDutyCycleAfterInh)
# Next iteration
if learn:
self._iterNum += 1
if infer:
self._inferenceIterNum += 1
if learn:
# Capture and possibly print the periodic stats
if self.printPeriodicStats > 0:
self._periodicStatsComputeEnd(onCellIndices, flatInput.nonzero()[0])
# Verbose print other stats
if self.spVerbosity >= 3:
cloning = (self.numCloneMasters != self._coincCount)
print " #connected on entry: ", fdru.numpyStr(
connectedCountsOnEntry, '%d ', includeIndices=True)
print " #connected on exit: ", fdru.numpyStr(
self._masterConnectedCoincSizes, '%d ', includeIndices=True)
if self.spVerbosity >= 3 or not cloning:
print " overlaps: ", fdru.numpyStr(self._overlapsBST, '%d ',
includeIndices=True, includeZeros=False)
print " firing levels: ", fdru.numpyStr(self._overlaps, '%.4f ',
includeIndices=True, includeZeros=False)
print " on after inhibition: ", onCellIndices
if not self._doneLearning:
print " minDutyCycleBeforeInh:", fdru.numpyStr(
self._minDutyCycleBeforeInh,
'%.4f ', includeIndices=True)
print " dutyCycleBeforeInh: ", fdru.numpyStr(self._dutyCycleBeforeInh,
'%.4f ', includeIndices=True)
print " belowMinBeforeInh: " % numpy.nonzero(
self._dutyCycleBeforeInh \
< self._minDutyCycleBeforeInh)[0]
print " minDutyCycleAfterInh: ", fdru.numpyStr(
self._minDutyCycleAfterInh,
'%.4f ', includeIndices=True)
print " dutyCycleAfterInh: ", fdru.numpyStr(self._dutyCycleAfterInh,
'%.4f ', includeIndices=True)
print " belowMinAfterInh: " % numpy.nonzero(
self._dutyCycleAfterInh \
< self._minDutyCycleAfterInh)[0]
print " firingBoosts: ", fdru.numpyStr(self._firingBoostFactors,
'%.4f ', includeIndices=True)
print
elif self.spVerbosity >= 2:
print "SP: learn: ", learn
print "SP: active outputs(%d): " % (len(onCellIndices)), onCellIndices
self._runIter += 1
# Return inference result
return self._onCells
def __getstate__(self):
# Update our random states
self._randomState = random.getstate()
self._numpyRandomState = numpy.random.get_state()
self._nupicRandomState = self.random.getState()
state = self.__dict__.copy()
# Delete ephemeral members that we don't want pickled
for ephemeralMemberName in self._getEphemeralMembers():
if ephemeralMemberName in state:
del state[ephemeralMemberName]
return state
def __setstate__(self, state):
self.__dict__.update(state)
# Support older checkpoints
# These fields were added on 2010-10-05 and _iterNum was preserved
if not hasattr(self, '_randomState'):
self._randomState = random.getstate()
self._numpyRandomState = numpy.random.get_state()
self._nupicRandomState = self.random.getState()
self._iterNum = 0
# For backward compatibility
if not hasattr(self, 'requestedDutyCyclePeriod'):
self.requestedDutyCyclePeriod = 1000
# Init our random number generators
random.setstate(self._randomState)
numpy.random.set_state(self._numpyRandomState)
self.random.setState(self._nupicRandomState)
# Load things that couldn't be pickled...
self._initEphemerals()
def getAnomalyScore(self):
"""Get the aggregate anomaly score for this input pattern
Returns: A single scalar value for the anomaly score
"""
numNonzero = len(numpy.nonzero(self._anomalyScores)[0])
return 1.0 / (numpy.sum(self._anomalyScores) + 1)
def getLearningStats(self):
"""Return a dictionary containing a set of statistics related to learning.
Here is a list of what is returned:
'activeCountAvg':
The average number of active columns seen over the last
N training iterations, where N is set by the constructor parameter
printPeriodicStats.
If printPeriodicStats is not turned on (== 0), then this is -1
'underCoveragePct':
The average under-coverage of the input as seen over the last N training
iterations, where N is set by the constructor parameter
printPeriodicStats.
If printPeriodicStats is not turned on (== 0), then this is -1
'overCoveragePct':
The average over-coverage of the input as seen over the last N training
iterations, where N is set by the constructor parameter
printPeriodicStats.
If printPeriodicStats is not turned on (== 0), then this is -1
'numConnectionChangesAvg':
The overall average number of connection changes made per active
column per iteration, over the last N training iterations, where N
is set by the constructor parameter printPeriodicStats. This gives an
indication as to how much learning is still occuring.
If printPeriodicStats is not turned on (== 0), then this is -1
'numConnectionChangesMin':
The minimum number of connection changes made to an active column per
iteration, over the last N training iterations, where N is set by the
constructor parameter printPeriodicStats. This gives an indication as
to how much learning is still occuring.
If printPeriodicStats is not turned on (== 0), then this is -1
'numConnectionChangesMax':
The maximum number of connection changes made to an active column per
iteration, over the last N training iterations, where N is set by the
constructor parameter printPeriodicStats. This gives an indication as
to how much learning is still occuring.
If printPeriodicStats is not turned on (== 0), then this is -1
'rfSize':
The average receptive field size of the columns.
'inhibitionRadius':
The average inihbition radius of the columns.
'targetDensityPct':
The most recent target local area density used, as a percent (0 -> 100)
'coincidenceSizeAvg':
The average learned coincidence size
'coincidenceSizeMin':
The minimum learned coincidence size
'coincidenceSizeMax':
The maximum learned coincidence size
'coincidenceSizeSum':
The sum of all coincidence sizes (total number of connected synapses)
'dcBeforeInhibitionAvg':
The average of duty cycle before inhbition of all coincidences
'dcBeforeInhibitionMin':
The minimum duty cycle before inhbition of all coincidences
'dcBeforeInhibitionAvg':
The maximum duty cycle before inhbition of all coincidences
'dcAfterInhibitionAvg':
The average of duty cycle after inhbition of all coincidences
'dcAfterInhibitionMin':
The minimum duty cycle after inhbition of all coincidences
'dcAfterInhibitionAvg':
The maximum duty cycle after inhbition of all coincidences
'firingBoostAvg':
The average firing boost
'firingBoostMin':
The minimum firing boost
'firingBoostMax':
The maximum firing boost
"""
# Fill in the stats that can be computed on the fly. The transient stats
# that depend on printPeriodicStats being on, have already been stored
self._learningStats['rfRadiusAvg'] = self._rfRadiusAvg
self._learningStats['rfRadiusMin'] = self._rfRadiusMin
self._learningStats['rfRadiusMax'] = self._rfRadiusMax
if self._inhibitionObj is not None:
self._learningStats['inhibitionRadius'] = (
self._inhibitionObj.getInhibitionRadius())
self._learningStats['targetDensityPct'] = (
100.0 * self._inhibitionObj.getLocalAreaDensity())
else:
print "Warning: No inhibitionObj found for getLearningStats"
self._learningStats['inhibitionRadius'] = 0.0
self._learningStats['targetDensityPct'] = 0.0
self._learningStats['coincidenceSizeAvg'] = (
self._masterConnectedCoincSizes.mean())
self._learningStats['coincidenceSizeMin'] = (
self._masterConnectedCoincSizes.min())
self._learningStats['coincidenceSizeMax'] = (
self._masterConnectedCoincSizes.max())
self._learningStats['coincidenceSizeSum'] = (
self._masterConnectedCoincSizes.sum())
if not self._doneLearning:
self._learningStats['dcBeforeInhibitionAvg'] = (
self._dutyCycleBeforeInh.mean())
self._learningStats['dcBeforeInhibitionMin'] = (
self._dutyCycleBeforeInh.min())
self._learningStats['dcBeforeInhibitionMax'] = (
self._dutyCycleBeforeInh.max())
self._learningStats['dcAfterInhibitionAvg'] = (
self._dutyCycleAfterInh.mean())
self._learningStats['dcAfterInhibitionMin'] = (
self._dutyCycleAfterInh.min())
self._learningStats['dcAfterInhibitionMax'] = (
self._dutyCycleAfterInh.max())
self._learningStats['firingBoostAvg'] = self._firingBoostFactors.mean()
self._learningStats['firingBoostMin'] = self._firingBoostFactors.min()
self._learningStats['firingBoostMax'] = self._firingBoostFactors.max()
return self._learningStats
def resetStats(self):
"""Reset the stats (periodic, ???). This will usually be called by
user code at the start of each inference run (for a particular data set).
TODO: which other stats need to be reset? Learning stats?
"""
self._periodicStatsReset()
def _seed(self, seed=-1):
"""
Initialize the random seed
"""
if seed != -1:
self.random = NupicRandom(seed)
random.seed(seed)
numpy.random.seed(seed)
else:
self.random = NupicRandom()
def _initialPermanence(self):
"""Create and return a 2D matrix filled with initial permanence values.
The returned matrix will be of shape:
(2*coincInputRadius + 1, 2*coincInputRadius + 1).
The initial permanence values are set between 0 and 1.0, with enough chosen
above synPermConnected to make it highly likely that a cell will pass
stimulusThreshold, given the size of the potential RF, the input pool
sampling percentage, and the expected density of the active inputs.
If gaussianDist is True, the center of the matrix will contain the highest
permanence values and lower values will be farther from the center.
If gaussianDist is False, the highest permanence values will be evenly
distributed throughout the potential RF.
"""
# Figure out the target number of connected synapses. We want about 2X
# stimulusThreshold
minOn = 2 * max(self.stimulusThreshold, 10) / self.coincInputPoolPct \
/ self.inputDensity
# Get the gaussian distribution, with max magnitude just slightly above
# synPermConnected. Try to find a sigma that gives us about 2X
# stimulusThreshold connected synapses after sub-sampling for
# coincInputPoolPct. We will assume everything within +/- sigma will be
# connected. This logic uses the fact that an x value of sigma generates a
# magnitude of 0.6.
if self.gaussianDist:
# Only supported when we have 2D layouts
if self._coincRFShape[0] != self._coincRFShape[1]:
raise RuntimeError("Gaussian distibuted permanences are currently only"
"supported for 2-D layouts")
# The width and height of the center "blob" in inputs is the square root
# of the area
onAreaDim = numpy.sqrt(minOn)
# Sigma is at the edge of the center blob
sigma = onAreaDim/2
# Create the gaussian with a value of 1.0 at the center
perms = self._gaussianMatrix(dim=max(self._coincRFShape), sigma=sigma)
# The distance between the min and max values within the gaussian will
# be given by 'grange'. In a gaussian, the value at sigma away from the
# center is 0.6 * the value at the center. We want the values at sigma
# to be synPermConnected
maxValue = 1.0 / 0.6 * self.synPermConnected
perms *= maxValue
perms.shape = (-1,)
# Now, let's clip off the low values to reduce the number of non-zeros
# we have and reduce our memory requirements. We'll clip everything
# farther away than 2 sigma to 0. The value of a gaussing at 2 sigma
# is 0.135 * the value at the center
perms[perms < (0.135 * maxValue)] = 0
# Evenly distribute the permanences through the RF
else:
# Create a random distribution from 0 to 1.
perms = numpy.random.random(self._coincRFShape)
perms = perms.astype(realDType)
# Set the range of values to be between 0 and
# synPermConnected+synPermInctiveDec. This ensures that a pattern
# will always be learned in 1 iteration
maxValue = min(1.0, self.synPermConnected + self.synPermInactiveDec)
# What percentage do we want to be connected?
connectPct = 0.50
# What value from the 0 to 1 distribution will map to synPermConnected?
threshold = 1.0 - connectPct
# Which will be the connected and unconnected synapses?
connectedSyns = perms >= threshold
unconnectedSyns = numpy.logical_not(connectedSyns)
# Squeeze all values between threshold and 1.0 to be between
# synPermConnected and synPermConnected + synPermActiveInc / 4
# This makes sure the firing coincidence perms matching input bit get
# greater than synPermConnected and other unconnectedSyns get deconnected
# in one firing learning iteration.
srcOffset = threshold
srcRange = 1.0 - threshold
dstOffset = self.synPermConnected
dstRange = maxValue - self.synPermConnected
perms[connectedSyns] = (perms[connectedSyns] - srcOffset)/srcRange \
* dstRange / 4.0 + dstOffset
# Squeeze all values between 0 and threshold to be between 0 and
# synPermConnected
srcRange = threshold - 0.0
dstRange = self.synPermConnected - 0.0
perms[unconnectedSyns] = perms[unconnectedSyns]/srcRange \
* dstRange
# Now, let's clip off the low values to reduce the number of non-zeros
# we have and reduce our memory requirements. We'll clip everything
# below synPermActiveInc/2 to 0
perms[perms < (self.synPermActiveInc / 2.0)] = 0
perms.shape = (-1,)
return perms
def _gaussianMatrix(self, dim, sigma):
"""
Create and return a 2D matrix filled with a gaussian distribution. The
returned matrix will be of shape (dim, dim). The mean of the gaussian
will be in the center of the matrix and have a value of 1.0.
"""
gaussian = lambda x, sigma: numpy.exp(-(x**2) / (2*(sigma**2)))
# Allocate the matrix
m = numpy.empty((dim, dim), dtype=realDType)
# Find the center
center = (dim - 1) / 2.0
# TODO: Simplify using numpy.meshgrid
# Fill it in
for y in xrange(dim):
for x in xrange(dim):
dist = numpy.sqrt((x-center)**2 + (y-center)**2)
m[y,x] = gaussian(dist, sigma)
return m
def _makeMasterCoincidences(self, numCloneMasters, coincRFShape,
coincInputPoolPct, initialPermanence=None,
nupicRandom=None):
"""Make the master coincidence matrices and mater input histograms.
# TODO: Update this example
>>> FDRCSpatial._makeMasterCoincidences(1, 2, 0.33)
(array([[[ True, True, False, False, False],
[False, True, False, False, True],
[False, True, False, False, False],
[False, False, False, True, False],
[ True, False, False, False, False]]], dtype=bool), array([[[ 0.26982325, 0.19995725, 0. , 0. , 0. ],
[ 0. , 0.94128972, 0. , 0. , 0.36316112],
[ 0. , 0.06312726, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0.29740077, 0. ],
[ 0.81071907, 0. , 0. , 0. , 0. ]]], dtype=float32))
"""
if nupicRandom is None:
nupicRandom = NupicRandom(42)
if initialPermanence is None:
initialPermanence = self._initialPermanence()
coincRfArea = (coincRFShape[0] * coincRFShape[1])
coincInputPool = coincInputPoolPct * coincRfArea
# We will generate a list of sparse matrices
masterPotentialM = []
masterPermanenceM = []
toSample = numpy.arange(coincRfArea, dtype='uint32')
toUse = numpy.empty(coincInputPool, dtype='uint32')
denseM = numpy.zeros(coincRfArea, dtype=realDType)
for i in xrange(numCloneMasters):
nupicRandom.getUInt32Sample(toSample, toUse)
# Put in 1's into the potential locations
denseM.fill(0)
denseM[toUse] = 1
masterPotentialM.append(SM_01_32_32(denseM.reshape(coincRFShape)))
# Put in the initial permanences
denseM *= initialPermanence
masterPermanenceM.append(SM32(denseM.reshape(coincRFShape)))
# If we are not using common initial permanences, create another
# unique one for the next cell
if not self.commonDistributions:
initialPermanence = self._initialPermanence()
return masterPotentialM, masterPermanenceM
def _updateConnectedCoincidences(self, masters=None):
"""Update 'connected' version of the given coincidence.
Each 'connected' coincidence is effectively a binary matrix (AKA boolean)
matrix that is the same size as the input histogram matrices. They have
a 1 wherever the inputHistogram is "above synPermConnected".
"""
# If no masterNum given, update all of them
if masters is None:
masters = xrange(self.numCloneMasters)
nCellRows, nCellCols = self._coincRFShape
cloningOn = (self.numCloneMasters != self._coincCount)
for masterNum in masters:
# Where are we connected?
masterConnectedNZ = (
self._masterPermanenceM[masterNum].whereGreaterEqual(
0, nCellRows, 0, nCellCols, self.synPermConnected))
rowIdxs = masterConnectedNZ[:,0]
colIdxs = masterConnectedNZ[:,1]
self._masterConnectedM[masterNum].setAllNonZeros(
nCellRows, nCellCols, rowIdxs, colIdxs)
self._masterConnectedCoincSizes[masterNum] = len(rowIdxs)
# Update the corresponding rows in the super, mondo connected matrix that
# come from this master
masterConnected = (
self._masterConnectedM[masterNum].toDense().astype('bool')) # 0.2s
if cloningOn:
cells = self._cellsForMaster[masterNum]
else:
cells = [masterNum]
for cell in cells:
inputSlice = self._inputSlices[cell]
coincSlice = self._coincSlices[cell]
masterSubset = masterConnected[coincSlice]
sparseCols = self._inputLayout[inputSlice][masterSubset]
self._allConnectedM.replaceSparseRow(cell, sparseCols) # 4s.
def _setSlices(self):
"""Compute self._columnSlices and self._inputSlices
self._inputSlices are used to index into the input (assuming it's been
shaped to a 2D array) to get the receptive field of each column. There
is one item in the list for each column.
self._coincSlices are used to index into the coinc (assuming it's been
shaped to a 2D array) to get the valid area of the column. There
is one item in the list for each column.
This function is called upon unpickling, since we can't pickle slices.
"""
self._columnCenters = numpy.array(self._computeCoincCenters(
self.inputShape, self.coincidencesShape, self.inputBorder))
coincInputRadius = self.coincInputRadius
coincHeight, coincWidth = self._coincRFShape
inputShape = self.inputShape
inputBorder = self.inputBorder
# Compute the input slices for each cell. This is the slice of the entire
# input which intersects with the cell's permanence matrix.
if self._hasTopology:
self._inputSlices = [
numpy.s_[max(0, cy-coincInputRadius):
min(inputShape[0], cy+coincInputRadius + 1),
max(0, cx-coincInputRadius):
min(inputShape[1], cx+coincInputRadius + 1)]
for (cy, cx) in self._columnCenters]
else:
self._inputSlices = [numpy.s_[0:inputShape[0], 0:inputShape[1]]
for (cy, cx) in self._columnCenters]
self._inputSlices2 = numpy.zeros(4 * len(self._inputSlices),
dtype="uint32")
k = 0
for i in range(len(self._inputSlices)):
self._inputSlices2[k] = self._inputSlices[i][0].start
self._inputSlices2[k + 1] = self._inputSlices[i][0].stop
self._inputSlices2[k + 2] = self._inputSlices[i][1].start
self._inputSlices2[k + 3] = self._inputSlices[i][1].stop
k = k + 4
# Compute the coinc slices for each cell. This is which portion of the
# cell's permanence matrix intersects with the input.
if self._hasTopology:
if self.inputShape[0] > 1:
self._coincSlices = [
numpy.s_[max(0, coincInputRadius - cy):
min(coincHeight, coincInputRadius + inputShape[0] - cy),
max(0, coincInputRadius-cx):
min(coincWidth, coincInputRadius + inputShape[1] - cx)]
for (cy, cx) in self._columnCenters]
else:
self._coincSlices = [
numpy.s_[0:1,
max(0, coincInputRadius-cx):
min(coincWidth, coincInputRadius + inputShape[1] - cx)]
for (cy, cx) in self._columnCenters]
else:
self._coincSlices = [numpy.s_[0:coincHeight, 0:coincWidth]
for (cy, cx) in self._columnCenters]
self._coincSlices2 = numpy.zeros((4*len(self._coincSlices)), dtype="uint32")
k = 0
for i in range(len(self._coincSlices)):
self._coincSlices2[k] = self._coincSlices[i][0].start
self._coincSlices2[k + 1] = self._coincSlices[i][0].stop
self._coincSlices2[k + 2] = self._coincSlices[i][1].start
self._coincSlices2[k + 3] = self._coincSlices[i][1].stop
k = k + 4
@staticmethod
def _computeCoincCenters(inputShape, coincidencesShape, inputBorder):
"""Compute the centers of all coincidences, given parameters.
This function is semi-public: tools may use it to generate good
visualizations of what the FDRCSpatial node is doing.
NOTE: It must be static or global function so that it can be called by
the ColumnActivityTab inspector *before* the first compute (before the
SP has been constructed).
If the input shape is (7,20), shown below with * for each input.
********************
********************
********************
********************
********************
********************
********************
If inputBorder is 1, we distribute the coincidences evenly over the
the area after removing the edges, @ shows the allowed input area below.
********************
*@@@@@@@@@@@@@@@@@@*
*@@@@@@@@@@@@@@@@@@*
*@@@@@@@@@@@@@@@@@@*
*@@@@@@@@@@@@@@@@@@*
*@@@@@@@@@@@@@@@@@@*
********************
Each coincidence is centered at the closest @ and looks at a area with
coincInputRadius below it.
This function call returns an iterator over the coincidence centers. Each
element in iterator is a tuple: (y, x). The iterator returns elements in a
fixed order.
"""
# Determine Y centers
if inputShape[0] > 1: # 2-D layout
startHeight = inputBorder
stopHeight = inputShape[0] - inputBorder
else:
startHeight = stopHeight = 0
heightCenters = numpy.linspace(startHeight,
stopHeight,
coincidencesShape[0],
endpoint=False).astype('int32')
# Determine X centers
startWidth = inputBorder
stopWidth = inputShape[1] - inputBorder
widthCenters = numpy.linspace(startWidth,
stopWidth,
coincidencesShape[1],
endpoint=False).astype('int32')
return list(cross(heightCenters, widthCenters))
def _updateInhibitionObj(self):
"""
Calculate the average inhibitionRadius to use and update the inhibition
object accordingly. This looks at the size of the average connected
receptive field and uses that to determine the inhibition radius.
"""
# Compute the inhibition radius.
# If using global inhibition, just set it to include the entire region
if self.globalInhibition:
avgRadius = max(self.coincidencesShape)
# Else, set it based on the average size of the connected synapses area in
# each cell.
else:
totalDim = 0
# Get the dimensions of the connected receptive fields of each cell to
# compute the average
minDim = numpy.inf
maxDim = 0
for masterNum in xrange(self.numCloneMasters):
masterConnected = self._masterConnectedM[masterNum]
nzs = masterConnected.getAllNonZeros()
rows, cols = zip(*nzs)
rows = numpy.array(rows)
cols = numpy.array(cols)
if len(rows) >= 2:
height = rows.max() - rows.min() + 1
else:
height = 1
if len(cols) >= 2:
width = cols.max() - cols.min() + 1
else:
width = 1
avgDim = (height + width) / 2.0
minDim = min(minDim, avgDim)
maxDim = max(maxDim, avgDim)
totalDim += avgDim
# Get average width/height in input space
avgDim = totalDim / self.numCloneMasters
self._rfRadiusAvg = (avgDim - 1.0) / 2.0
self._rfRadiusMin = (minDim - 1.0) / 2.0
self._rfRadiusMax = (maxDim - 1.0) / 2.0
# How many columns in cell space does it correspond to?
if self.inputShape[0] > 1: # 2-D layout
coincsPerInputX = (float(self.coincidencesShape[1]) /
(self.inputShape[1] - 2 * self.inputBorder))
coincsPerInputY = (float(self.coincidencesShape[0]) /
(self.inputShape[0] - 2 * self.inputBorder))
else:
coincsPerInputX = coincsPerInputY = (
float(self.coincidencesShape[1] * self.coincidencesShape[0]) /
(self.inputShape[1] - 2 * self.inputBorder))
avgDim *= (coincsPerInputX + coincsPerInputY) / 2
avgRadius = (avgDim - 1.0) / 2.0
avgRadius = max(1.0, avgRadius)
# Can't be greater than the overall width or height of the level
maxDim = max(self.coincidencesShape)
avgRadius = min(avgRadius, maxDim)
avgRadius = int(round(avgRadius))
# Is there a need to re-instantiate the inhibition object?
if (self._inhibitionObj is None or
self._inhibitionObj.getInhibitionRadius() != avgRadius):
# What is our target density?
if self.localAreaDensity > 0:
localAreaDensity = self.localAreaDensity
else:
numCellsPerInhArea = (avgRadius * 2.0 + 1.0) ** 2
totalCells = self.coincidencesShape[0] * self.coincidencesShape[1]
numCellsPerInhArea = min(numCellsPerInhArea, totalCells)
localAreaDensity = float(self.numActivePerInhArea) / numCellsPerInhArea
# Don't let it be greater than 0.50
localAreaDensity = min(localAreaDensity, 0.50)
if self.spVerbosity >= 2:
print "Updating inhibition object:"
print " avg. rfRadius:", self._rfRadiusAvg
print " avg. inhRadius:", avgRadius
print " Setting density to:", localAreaDensity
self._inhibitionObj = Inhibition2(self.coincidencesShape[0], # height
self.coincidencesShape[1], # width
avgRadius, # inhRadius
localAreaDensity) # density
def _updateMinDutyCycles(self, actDutyCycles, minPctDutyCycle, minDutyCycles):
"""
Calculate and update the minimum acceptable duty cycle for each cell based
on the duty cycles of the cells within its inhibition radius and the
minPctDutyCycle.
Parameters:
-----------------------------------------------------------------------
actDutyCycles: The actual duty cycles of all cells
minPctDutyCycle: Each cell's minimum duty cycle will be set to
minPctDutyCycle times the duty cycle of the most active
cell within its inhibition radius
minDutyCycles: This array will be updated in place with the new minimum
acceptable duty cycles
"""
# What is the inhibition radius?
inhRadius = self._inhibitionObj.getInhibitionRadius()
# Reshape the actDutyCycles to match the topology of the level
cloningOn = (self.numCloneMasters != self._coincCount)
if not cloningOn:
actDutyCycles = actDutyCycles.reshape(self.coincidencesShape)
minDutyCycles = minDutyCycles.reshape(self.coincidencesShape)
# Special, faster handling when inhibition radius includes the entire
# set of cells.
if cloningOn or inhRadius >= max(self.coincidencesShape):
minDutyCycle = minPctDutyCycle * actDutyCycles.max()
minDutyCycles.fill(minPctDutyCycle * actDutyCycles.max())
# Else, process each cell
else:
(numRows, numCols) = self.coincidencesShape
for row in xrange(numRows):
top = max(0, row - inhRadius)
bottom = min(row + inhRadius + 1, numRows)
for col in xrange(numCols):
left = max(0, col - inhRadius)
right = min(col + inhRadius + 1, numCols)
maxDutyCycle = actDutyCycles[top:bottom, left:right].max()
minDutyCycles[row, col] = maxDutyCycle * minPctDutyCycle
if self.spVerbosity >= 2:
print "Actual duty cycles:"
print fdru.numpyStr(actDutyCycles, '%.4f')
print "Recomputed min duty cycles, using inhRadius of", inhRadius
print fdru.numpyStr(minDutyCycles, '%.4f')
def _computeOverlapsPy(self, inputShaped, stimulusThreshold):
"""
Computes overlaps for every column for the current input in place. The
overlaps less than stimulus threshold are set to zero here.
For columns with input RF going off the edge of input field, only regions
within the input field are considered. This is equivalent to padding the
input field with zeros.
Parameters:
------------------------------------------------------------------------
inputShaped: input at the current time step, shaped to the input
topology
stimulusThreshold: stimulusThreshold to use
Member variables used/updated:
------------------------------------------------------------------------
_inputSlices: Index into the input (assuming it's been shaped to a 2D
array) to get the receptive field of each column.
_coincSlices: Index into the coinc (assuming it's been shaped to a 2D
array) to get the valid region of each column.
_overlaps: Result is placed into this array which holds the overlaps of
each column with the input
"""
flatInput = inputShaped.reshape(-1)
self._allConnectedM.rightVecSumAtNZ_fast(flatInput, self._overlaps)
# Apply stimulusThreshold
# TODO: Is there a faster numpy operation for this?
self._overlaps[self._overlaps < stimulusThreshold] = 0
self._overlapsNoBoost = self._overlaps.copy()
def _computeOverlapsCPP(self, inputShaped, stimulusThreshold):
"""
Same as _computeOverlapsPy, but using a C++ implementation.
"""
cpp_overlap(self._cloneMapFlat,
self._inputSlices2, self._coincSlices2,
inputShaped, self._masterConnectedM,
stimulusThreshold,
self._overlaps)
def _computeOverlapsTest(self, inputShaped, stimulusThreshold):
"""
Same as _computeOverlapsPy, but compares the python and C++
implementations.
"""
# Py version
self._computeOverlapsPy(inputShaped, stimulusThreshold)
overlaps2 = copy.deepcopy(self._overlaps)
# C++ version
self._computeOverlapsCPP(inputShaped, stimulusThreshold)
if (abs(self._overlaps - overlaps2) > 1e-6).any():
print self._overlaps, overlaps2, abs(self._overlaps - overlaps2)
import pdb; pdb.set_trace()
sys.exit(0)
def _raiseAllPermanences(self, masterNum, minConnections=None,
densePerm=None, densePotential=None):
"""
Raise all permanences of the given master. If minConnections is given, the
permanences will be raised until at least minConnections of them are
connected strength.
If minConnections is left at None, all permanences will be raised by
self._synPermBelowStimulusInc.
After raising all permanences, we also "sparsify" the permanence matrix
and set to 0 any permanences which are already very close to 0, this
keeps the memory requirements of the sparse matrices used to store
the permanences lower.
Parameters:
----------------------------------------------------------------------------
masterNum: Which master to bump up
minConnections: Desired number of connected synapses to have
If None, then all permanences are simply bumped up
by self._synPermBelowStimulusInc
densePerm: The dense representation of the master's permanence
matrix, if available. If not specified, we will
create this from the stored sparse representation.
Providing this will avoid some compute overhead.
If provided, it is assumed that it is more recent
than the stored sparse matrix. The stored sparse
matrix will ALWAYS be updated from the densePerm if
the densePerm is provided.
densePotential: The dense representation of the master's potential
synapses matrix, if available. If not specified, we
will create this from the stored sparse potential
matrix.
Providing this will avoid some compute overhead.
If provided, it is assumed that it is more recent
than the stored sparse matrix.
retval: (modified, numConnections)
modified: True if any permanences were raised
numConnections: Number of actual connected synapses
(not computed if minConnections was
None, so None is returned in that
case.)
"""
# It's faster to perform this operation on the dense matrices and
# then convert to sparse once we're done since we will be potentially
# introducing and then later removing a bunch of non-zeros.
# Get references to the sparse perms and potential syns for this master
sparsePerm = self._masterPermanenceM[masterNum]
sparsePotential = self._masterPotentialM[masterNum]
# We will trim off all synapse permanences below this value to 0 in order
# to keep the memory requirements of the SparseMatrix lower
trimThreshold = self.synPermActiveInc / 2.0
# See if we already have the required number of connections. If we don't,
# get the dense form of the permanences if we don't have them already
if densePerm is None:
# See if we already have enough connections, if so, we can avoid the
# overhead of converting to dense
if minConnections is not None:
numConnected = sparsePerm.countWhereGreaterEqual(
0, self._coincRFShape[0], 0, self._coincRFShape[1],
self.synPermConnected)
if numConnected >= minConnections:
return (False, numConnected)
densePerm = self._masterPermanenceM[masterNum].toDense()
elif minConnections is not None:
numConnected = count_gte(densePerm.reshape(-1), self.synPermConnected)
if numConnected >= minConnections:
sparsePerm.fromDense(densePerm)
sparsePerm.threshold(trimThreshold)
return (False, numConnected)
# Get the dense form of the potential synapse locations
if densePotential is None:
densePotential = self._masterPotentialM[masterNum].toDense()
# Form the array with the increments
incrementM = densePotential.astype(realDType)
incrementM *= self._synPermBelowStimulusInc
# Increment until we reach our target number of connections
assert (densePerm.dtype == realDType)
while True:
densePerm += incrementM
if minConnections is None:
numConnected = None
break
numConnected = count_gte(densePerm.reshape(-1), self.synPermConnected)
if numConnected >= minConnections:
break
# Convert back to sparse form and trim any values that are already
# close to zero
sparsePerm.fromDense(densePerm)
sparsePerm.threshold(trimThreshold)
return (True, numConnected)
def _bumpUpWeakCoincidences(self):
"""
This bump-up ensures every coincidence have non-zero connections. We find
all coincidences which have overlaps less than stimulus threshold.
We add synPermActiveInc to all the synapses. This step when repeated over
time leads to synapses crossing synPermConnected threshold.
"""
# Update each cell's connected threshold based on the duty cycle before
# inhibition. The connected threshold is linearly interpolated
# between the points (dutyCycle:0, thresh:0) and (dutyCycle:minDuty,
# thresh:synPermConnected). This is a line defined as: y = mx + b
# thresh = synPermConnected/minDuty * dutyCycle
bumpUpList = (
self._dutyCycleBeforeInh < self._minDutyCycleBeforeInh).nonzero()[0]
for master in bumpUpList:
self._raiseAllPermanences(master)
# Update the connected synapses for each master we touched.
self._updateConnectedCoincidences(bumpUpList)
if self.spVerbosity >= 2 and len(bumpUpList) > 0:
print ("Bumping up permanences in following cells due to falling below"
"minDutyCycleBeforeInh:"), bumpUpList
def _updateBoostFactors(self):
"""
Update the boost factors. The boost factors is linearly interpolated
between the points (dutyCycle:0, boost:maxFiringBoost) and
(dutyCycle:minDuty, boost:1.0). This is a line defined as: y = mx + b
boost = (1-maxFiringBoost)/minDuty * dutyCycle + maxFiringBoost
Parameters:
------------------------------------------------------------------------
boostFactors: numpy array of boost factors, defined per master
"""
if self._minDutyCycleAfterInh.sum() > 0:
self._firingBoostFactors = (
(1 - self.maxFiringBoost) /
self._minDutyCycleAfterInh * self._dutyCycleAfterInh +
self.maxFiringBoost)
self._firingBoostFactors[self._dutyCycleAfterInh >
self._minDutyCycleAfterInh] = 1.0
def _updateInputUse(self, onCellIndices):
"""
During learning (adapting permanence values), we need to be able to tell
which inputs are going to 2 or more active cells at once.
We step through each coinc and mark all the inputs it is connected to. The
inputUse array acts as a counter for the number of connections to the coincs
from each input.
Parameters:
------------------------------------------------------------------------
inputUse: numpy array of number of coincs connected to each input
"""
allConnected = SM32(self._allConnectedM)
# TODO: avoid this copy
self._inputUse[:] = allConnected.addListOfRows(
onCellIndices).reshape(self.inputShape)
def _adaptSynapses(self, onCellIndices, orphanCellIndices, input):
"""
This is the main function in learning of SP. The permanence values are
changed based on the learning rules.
Parameters:
------------------------------------------------------------------------
onCellIndices: columns which are turned on after inhibition. The
permanence values of these coincs are adapted based on the
input.
orphanCellIndices: columns which had very high overlap with the input, but
ended up being inhibited
input: Input, shaped to the input topology
retval: list of masterCellIndices that were actually updated, or
None if cloning is off
"""
# Capturing learning stats?
if self.printPeriodicStats > 0:
self._stats['explainedInputsCurIteration'] = set()
# Precompute the active, inactive, and dupe inputs up front for speed
# TODO: put these into pre-allocated arrays for speed
self._activeInput[:] = input
# Create a matrix containing the default permanence deltas for each input
self._permChanges.fill(-1 * self.synPermInactiveDec)
self._permChanges[self._activeInput] = self.synPermActiveInc
if self.synPermActiveSharedDec != 0:
numpy.logical_and(self._activeInput, self._inputUse>1, self._dupeInput)
self._permChanges[self._dupeInput] -= self.synPermActiveSharedDec
# Cloning? If so, scramble the onCells so that we pick a random one to
# update for each master. We only update a master cell at most one time
# per input presentation.
cloningOn = (self.numCloneMasters != self._coincCount)
if cloningOn:
# Scramble the onCellIndices so that we pick a random one to update
onCellIndices = list(onCellIndices)
random.shuffle(onCellIndices)
visitedMasters = set()
# For the firing cells, update permanence values
for columnNum in itertools.chain(onCellIndices, orphanCellIndices):
# Get the master number
masterNum = self._cloneMapFlat[columnNum]
# If cloning, only visit each master once
if cloningOn:
if masterNum in visitedMasters:
continue
visitedMasters.add(masterNum)
# Get the slices of input that overlap with the valid area of this master
inputSlice = self._inputSlices[columnNum]
rfActiveInput = self._activeInput[inputSlice]
rfPermChanges = self._permChanges[inputSlice]
# Get the potential synapses, permanence values, and connected synapses
# for this master
masterPotential = self._masterPotentialM[masterNum].toDense()
masterPermanence = self._masterPermanenceM[masterNum].toDense()
masterConnected = (
self._masterConnectedM[masterNum].toDense().astype('bool'))
# Make changes only over the areas that overlap the input level. For
# coincidences near the edge of the level for example, this excludes the
# synapses outside the edge.
coincSlice = self._coincSlices[columnNum]
masterValidPermanence= masterPermanence[coincSlice]
# Capturing learning stats?
if self.printPeriodicStats > 0:
masterValidConnected = masterConnected[coincSlice]
explainedInputs = self._inputLayout[inputSlice][masterValidConnected]
self._stats['explainedInputsCurIteration'].update(explainedInputs)
if self.spVerbosity >= 4:
print " adapting cell:%d [%d:%d] (master:%d)" % (columnNum,
| columnNum // self.coincidencesShape[1], | 8,992 | lcc_e | python | null | a2578e61e6b5d25ae80d36c044bb63669ffa35e99835aab4 |
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Logical units dealing with storage of instances."""
import itertools
import logging
import os
import time
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import ht
from ganeti import locking
from ganeti.masterd import iallocator
from ganeti import objects
from ganeti import utils
import ganeti.rpc.node as rpc
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeUuidAndName, \
CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes, \
CheckDiskTemplateEnabled
from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
import ganeti.masterd.instance
_DISK_TEMPLATE_NAME_PREFIX = {
constants.DT_PLAIN: "",
constants.DT_RBD: ".rbd",
constants.DT_EXT: ".ext",
constants.DT_FILE: ".file",
constants.DT_SHARED_FILE: ".sharedfile",
}
def CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
excl_stor):
"""Create a single block device on a given node.
This will not recurse over children of the device, so they must be
created in advance.
@param lu: the lu on whose behalf we execute
@param node_uuid: the node on which to create the device
@type instance: L{objects.Instance}
@param instance: the instance which owns the device
@type device: L{objects.Disk}
@param device: the device to create
@param info: the extra 'metadata' we should attach to the device
(this will be represented as a LVM tag)
@type force_open: boolean
@param force_open: this parameter will be passes to the
L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
@type excl_stor: boolean
@param excl_stor: Whether exclusive_storage is active for the node
"""
result = lu.rpc.call_blockdev_create(node_uuid, (device, instance),
device.size, instance.name, force_open,
info, excl_stor)
result.Raise("Can't create block device %s on"
" node %s for instance %s" % (device,
lu.cfg.GetNodeName(node_uuid),
instance.name))
def _CreateBlockDevInner(lu, node_uuid, instance, device, force_create,
info, force_open, excl_stor):
"""Create a tree of block devices on a given node.
If this device type has to be created on secondaries, create it and
all its children.
If not, just recurse to children keeping the same 'force' value.
@attention: The device has to be annotated already.
@param lu: the lu on whose behalf we execute
@param node_uuid: the node on which to create the device
@type instance: L{objects.Instance}
@param instance: the instance which owns the device
@type device: L{objects.Disk}
@param device: the device to create
@type force_create: boolean
@param force_create: whether to force creation of this device; this
will be change to True whenever we find a device which has
CreateOnSecondary() attribute
@param info: the extra 'metadata' we should attach to the device
(this will be represented as a LVM tag)
@type force_open: boolean
@param force_open: this parameter will be passes to the
L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
@type excl_stor: boolean
@param excl_stor: Whether exclusive_storage is active for the node
@return: list of created devices
"""
created_devices = []
try:
if device.CreateOnSecondary():
force_create = True
if device.children:
for child in device.children:
devs = _CreateBlockDevInner(lu, node_uuid, instance, child,
force_create, info, force_open, excl_stor)
created_devices.extend(devs)
if not force_create:
return created_devices
CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
excl_stor)
# The device has been completely created, so there is no point in keeping
# its subdevices in the list. We just add the device itself instead.
created_devices = [(node_uuid, device)]
return created_devices
except errors.DeviceCreationError, e:
e.created_devices.extend(created_devices)
raise e
except errors.OpExecError, e:
raise errors.DeviceCreationError(str(e), created_devices)
def IsExclusiveStorageEnabledNodeUuid(cfg, node_uuid):
"""Whether exclusive_storage is in effect for the given node.
@type cfg: L{config.ConfigWriter}
@param cfg: The cluster configuration
@type node_uuid: string
@param node_uuid: The node UUID
@rtype: bool
@return: The effective value of exclusive_storage
@raise errors.OpPrereqError: if no node exists with the given name
"""
ni = cfg.GetNodeInfo(node_uuid)
if ni is None:
raise errors.OpPrereqError("Invalid node UUID %s" % node_uuid,
errors.ECODE_NOENT)
return IsExclusiveStorageEnabledNode(cfg, ni)
def _CreateBlockDev(lu, node_uuid, instance, device, force_create, info,
force_open):
"""Wrapper around L{_CreateBlockDevInner}.
This method annotates the root device first.
"""
(disk,) = AnnotateDiskParams(instance, [device], lu.cfg)
excl_stor = IsExclusiveStorageEnabledNodeUuid(lu.cfg, node_uuid)
return _CreateBlockDevInner(lu, node_uuid, instance, disk, force_create, info,
force_open, excl_stor)
def _UndoCreateDisks(lu, disks_created, instance):
"""Undo the work performed by L{CreateDisks}.
This function is called in case of an error to undo the work of
L{CreateDisks}.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@param disks_created: the result returned by L{CreateDisks}
@type instance: L{objects.Instance}
@param instance: the instance for which disks were created
"""
for (node_uuid, disk) in disks_created:
result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
result.Warn("Failed to remove newly-created disk %s on node %s" %
(disk, lu.cfg.GetNodeName(node_uuid)), logging.warning)
def CreateDisks(lu, instance, disk_template=None,
to_skip=None, target_node_uuid=None, disks=None):
"""Create all disks for an instance.
This abstracts away some work from AddInstance.
Since the instance may not have been saved to the config file yet, this
function can not query the config file for the instance's disks; in that
case they need to be passed as an argument.
This function is also used by the disk template conversion mechanism to
create the new disks of the instance. Since the instance will have the
old template at the time we create the new disks, the new template must
be passed as an extra argument.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should create
@type disk_template: string
@param disk_template: if provided, overrides the instance's disk_template
@type to_skip: list
@param to_skip: list of indices to skip
@type target_node_uuid: string
@param target_node_uuid: if passed, overrides the target node for creation
@type disks: list of {objects.Disk}
@param disks: the disks to create; if not specified, all the disks of the
instance are created
@return: information about the created disks, to be used to call
L{_UndoCreateDisks}
@raise errors.OpPrereqError: in case of error
"""
info = GetInstanceInfoText(instance)
if disks is None:
disks = lu.cfg.GetInstanceDisks(instance.uuid)
if target_node_uuid is None:
pnode_uuid = instance.primary_node
# We cannot use config's 'GetInstanceNodes' here as 'CreateDisks'
# is used by 'LUInstanceCreate' and the instance object is not
# stored in the config yet.
all_node_uuids = []
for disk in disks:
all_node_uuids.extend(disk.all_nodes)
all_node_uuids = set(all_node_uuids)
# ensure that primary node is always the first
all_node_uuids.discard(pnode_uuid)
all_node_uuids = [pnode_uuid] + list(all_node_uuids)
else:
pnode_uuid = target_node_uuid
all_node_uuids = [pnode_uuid]
if disk_template is None:
disk_template = instance.disk_template
CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), disk_template)
if disk_template in constants.DTS_FILEBASED:
file_storage_dir = os.path.dirname(disks[0].logical_id[1])
result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir)
result.Raise("Failed to create directory '%s' on"
" node %s" % (file_storage_dir,
lu.cfg.GetNodeName(pnode_uuid)))
disks_created = []
for idx, device in enumerate(disks):
if to_skip and idx in to_skip:
continue
logging.info("Creating disk %s for instance '%s'", idx, instance.name)
for node_uuid in all_node_uuids:
f_create = node_uuid == pnode_uuid
try:
_CreateBlockDev(lu, node_uuid, instance, device, f_create, info,
f_create)
disks_created.append((node_uuid, device))
except errors.DeviceCreationError, e:
logging.warning("Creating disk %s for instance '%s' failed",
idx, instance.name)
disks_created.extend(e.created_devices)
_UndoCreateDisks(lu, disks_created, instance)
raise errors.OpExecError(e.message)
return disks_created
def ComputeDiskSizePerVG(disk_template, disks):
"""Compute disk size requirements in the volume group
"""
def _compute(disks, payload):
"""Universal algorithm.
"""
vgs = {}
for disk in disks:
vg_name = disk[constants.IDISK_VG]
vgs[vg_name] = \
vgs.get(vg_name, 0) + disk[constants.IDISK_SIZE] + payload
return vgs
# Required free disk space as a function of disk and swap space
req_size_dict = {
constants.DT_DISKLESS: {},
constants.DT_PLAIN: _compute(disks, 0),
# 128 MB are added for drbd metadata for each disk
constants.DT_DRBD8: _compute(disks, constants.DRBD_META_SIZE),
constants.DT_FILE: {},
constants.DT_SHARED_FILE: {},
constants.DT_GLUSTER: {},
}
if disk_template not in req_size_dict:
raise errors.ProgrammerError("Disk template '%s' size requirement"
" is unknown" % disk_template)
return req_size_dict[disk_template]
def ComputeDisks(disks, disk_template, default_vg):
"""Computes the instance disks.
@type disks: list of dictionaries
@param disks: The disks' input dictionary
@type disk_template: string
@param disk_template: The disk template of the instance
@type default_vg: string
@param default_vg: The default_vg to assume
@return: The computed disks
"""
new_disks = []
for disk in disks:
mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
if mode not in constants.DISK_ACCESS_SET:
raise errors.OpPrereqError("Invalid disk access mode '%s'" %
mode, errors.ECODE_INVAL)
size = disk.get(constants.IDISK_SIZE, None)
if size is None:
raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
try:
size = int(size)
except (TypeError, ValueError):
raise errors.OpPrereqError("Invalid disk size '%s'" % size,
errors.ECODE_INVAL)
ext_provider = disk.get(constants.IDISK_PROVIDER, None)
if ext_provider and disk_template != constants.DT_EXT:
raise errors.OpPrereqError("The '%s' option is only valid for the %s"
" disk template, not %s" %
(constants.IDISK_PROVIDER, constants.DT_EXT,
disk_template), errors.ECODE_INVAL)
data_vg = disk.get(constants.IDISK_VG, default_vg)
name = disk.get(constants.IDISK_NAME, None)
if name is not None and name.lower() == constants.VALUE_NONE:
name = None
new_disk = {
constants.IDISK_SIZE: size,
constants.IDISK_MODE: mode,
constants.IDISK_VG: data_vg,
constants.IDISK_NAME: name,
}
for key in [
constants.IDISK_METAVG,
constants.IDISK_ADOPT,
constants.IDISK_SPINDLES,
]:
if key in disk:
new_disk[key] = disk[key]
# Add IDISK_ACCESS parameter for disk templates that support it
if (disk_template in constants.DTS_HAVE_ACCESS and
constants.IDISK_ACCESS in disk):
new_disk[constants.IDISK_ACCESS] = disk[constants.IDISK_ACCESS]
# For extstorage, demand the `provider' option and add any
# additional parameters (ext-params) to the dict
if disk_template == constants.DT_EXT:
if ext_provider:
new_disk[constants.IDISK_PROVIDER] = ext_provider
for key in disk:
if key not in constants.IDISK_PARAMS:
new_disk[key] = disk[key]
else:
raise errors.OpPrereqError("Missing provider for template '%s'" %
constants.DT_EXT, errors.ECODE_INVAL)
new_disks.append(new_disk)
return new_disks
def ComputeDisksInfo(disks, disk_template, default_vg, ext_params):
"""Computes the new instance's disks for the template conversion.
This method is used by the disks template conversion mechanism. Using the
'ComputeDisks' method as an auxiliary method computes the disks that will be
used for generating the new disk template of the instance. It computes the
size, mode, and name parameters from the instance's current disks, such as
the volume group and the access parameters for the templates that support
them. For conversions targeting an extstorage template, the mandatory
provider's name or any user-provided extstorage parameters will also be
included in the result.
@type disks: list of {objects.Disk}
@param disks: The current disks of the instance
@type disk_template: string
@param disk_template: The disk template of the instance
@type default_vg: string
@param default_vg: The default volume group to assume
@type ext_params: dict
@param ext_params: The extstorage parameters
@rtype: list of dictionaries
@return: The computed disks' information for the new template
"""
# Ensure 'ext_params' does not violate existing disks' params
for key in ext_params.keys():
if key != constants.IDISK_PROVIDER:
assert key not in constants.IDISK_PARAMS, \
"Invalid extstorage parameter '%s'" % key
# Prepare the disks argument for the 'ComputeDisks' method.
inst_disks = [dict((key, value) for key, value in disk.iteritems()
if key in constants.IDISK_PARAMS)
for disk in map(objects.Disk.ToDict, disks)]
# Update disks with the user-provided 'ext_params'.
for disk in inst_disks:
disk.update(ext_params)
# Compute the new disks' information.
new_disks = ComputeDisks(inst_disks, disk_template, default_vg)
# Add missing parameters to the previously computed disks.
for disk, new_disk in zip(disks, new_disks):
# Conversions between ExtStorage templates allowed only for different
# providers.
if (disk.dev_type == disk_template and
disk_template == constants.DT_EXT):
provider = new_disk[constants.IDISK_PROVIDER]
if provider == disk.params[constants.IDISK_PROVIDER]:
raise errors.OpPrereqError("Not converting, '%s' of type ExtStorage"
" already using provider '%s'" %
(disk.iv_name, provider), errors.ECODE_INVAL)
# Add IDISK_ACCESS parameter for conversions between disk templates that
# support it.
if (disk_template in constants.DTS_HAVE_ACCESS and
constants.IDISK_ACCESS in disk.params):
new_disk[constants.IDISK_ACCESS] = disk.params[constants.IDISK_ACCESS]
# For LVM-based conversions (plain <-> drbd) use the same volume group.
if disk_template in constants.DTS_LVM:
if disk.dev_type == constants.DT_PLAIN:
new_disk[constants.IDISK_VG] = disk.logical_id[0]
elif disk.dev_type == constants.DT_DRBD8:
new_disk[constants.IDISK_VG] = disk.children[0].logical_id[0]
return new_disks
def CalculateFileStorageDir(lu):
"""Calculate final instance file storage dir.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@rtype: string
@return: The file storage directory for the instance
"""
# file storage dir calculation/check
instance_file_storage_dir = None
if lu.op.disk_template in constants.DTS_FILEBASED:
# build the full file storage dir path
joinargs = []
cfg_storage = None
if lu.op.disk_template == constants.DT_FILE:
cfg_storage = lu.cfg.GetFileStorageDir()
elif lu.op.disk_template == constants.DT_SHARED_FILE:
cfg_storage = lu.cfg.GetSharedFileStorageDir()
elif lu.op.disk_template == constants.DT_GLUSTER:
cfg_storage = lu.cfg.GetGlusterStorageDir()
if not cfg_storage:
raise errors.OpPrereqError(
"Cluster file storage dir for {tpl} storage type not defined".format(
tpl=repr(lu.op.disk_template)
),
errors.ECODE_STATE
)
joinargs.append(cfg_storage)
if lu.op.file_storage_dir is not None:
joinargs.append(lu.op.file_storage_dir)
if lu.op.disk_template != constants.DT_GLUSTER:
joinargs.append(lu.op.instance_name)
if len(joinargs) > 1:
# pylint: disable=W0142
instance_file_storage_dir = utils.PathJoin(*joinargs)
else:
instance_file_storage_dir = joinargs[0]
return instance_file_storage_dir
def CheckRADOSFreeSpace():
"""Compute disk size requirements inside the RADOS cluster.
"""
# For the RADOS cluster we assume there is always enough space.
pass
def _GenerateDRBD8Branch(lu, primary_uuid, secondary_uuid, size, vgnames, names,
iv_name, p_minor, s_minor):
"""Generate a drbd8 device complete with its children.
"""
assert len(vgnames) == len(names) == 2
port = lu.cfg.AllocatePort()
shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
dev_data = objects.Disk(dev_type=constants.DT_PLAIN, size=size,
logical_id=(vgnames[0], names[0]),
params={})
dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
dev_meta = objects.Disk(dev_type=constants.DT_PLAIN,
size=constants.DRBD_META_SIZE,
logical_id=(vgnames[1], names[1]),
params={})
dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
drbd_dev = objects.Disk(dev_type=constants.DT_DRBD8, size=size,
logical_id=(primary_uuid, secondary_uuid, port,
p_minor, s_minor,
shared_secret),
children=[dev_data, dev_meta],
iv_name=iv_name, params={})
drbd_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
return drbd_dev
def GenerateDiskTemplate(
lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids,
disk_info, file_storage_dir, file_driver, base_index,
feedback_fn, full_disk_params):
"""Generate the entire disk layout for a given template type.
"""
vgname = lu.cfg.GetVGName()
disk_count = len(disk_info)
disks = []
CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), template_name)
if template_name == constants.DT_DISKLESS:
pass
elif template_name == constants.DT_DRBD8:
if len(secondary_node_uuids) != 1:
raise errors.ProgrammerError("Wrong template configuration")
remote_node_uuid = secondary_node_uuids[0]
minors = lu.cfg.AllocateDRBDMinor(
[primary_node_uuid, remote_node_uuid] * len(disk_info), instance_uuid)
(drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
full_disk_params)
drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
names = []
for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
for i in range(disk_count)]):
names.append(lv_prefix + "_data")
names.append(lv_prefix + "_meta")
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
data_vg = disk.get(constants.IDISK_VG, vgname)
meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
disk_dev = _GenerateDRBD8Branch(lu, primary_node_uuid, remote_node_uuid,
disk[constants.IDISK_SIZE],
[data_vg, meta_vg],
names[idx * 2:idx * 2 + 2],
"disk/%d" % disk_index,
minors[idx * 2], minors[idx * 2 + 1])
disk_dev.mode = disk[constants.IDISK_MODE]
disk_dev.name = disk.get(constants.IDISK_NAME, None)
disks.append(disk_dev)
else:
if secondary_node_uuids:
raise errors.ProgrammerError("Wrong template configuration")
name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
if name_prefix is None:
names = None
else:
names = _GenerateUniqueNames(lu, ["%s.disk%s" %
(name_prefix, base_index + i)
for i in range(disk_count)])
if template_name == constants.DT_PLAIN:
def logical_id_fn(idx, _, disk):
vg = disk.get(constants.IDISK_VG, vgname)
return (vg, names[idx])
elif template_name == constants.DT_GLUSTER:
logical_id_fn = lambda _1, disk_index, _2: \
(file_driver, "ganeti/%s.%d" % (instance_uuid,
disk_index))
elif template_name in constants.DTS_FILEBASED: # Gluster handled above
logical_id_fn = \
lambda _, disk_index, disk: (file_driver,
"%s/%s" % (file_storage_dir,
names[idx]))
elif template_name == constants.DT_BLOCK:
logical_id_fn = \
lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
disk[constants.IDISK_ADOPT])
elif template_name == constants.DT_RBD:
logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
elif template_name == constants.DT_EXT:
def logical_id_fn(idx, _, disk):
provider = disk.get(constants.IDISK_PROVIDER, None)
if provider is None:
raise errors.ProgrammerError("Disk template is %s, but '%s' is"
" not found", constants.DT_EXT,
constants.IDISK_PROVIDER)
return (provider, names[idx])
else:
raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
dev_type = template_name
for idx, disk in enumerate(disk_info):
params = {}
# Only for the Ext template add disk_info to params
if template_name == constants.DT_EXT:
params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
for key in disk:
if key not in constants.IDISK_PARAMS:
params[key] = disk[key]
# Add IDISK_ACCESS param to disk params
if (template_name in constants.DTS_HAVE_ACCESS and
constants.IDISK_ACCESS in disk):
params[constants.IDISK_ACCESS] = disk[constants.IDISK_ACCESS]
disk_index = idx + base_index
size = disk[constants.IDISK_SIZE]
feedback_fn("* disk %s, size %s" %
(disk_index, utils.FormatUnit(size, "h")))
disk_dev = objects.Disk(dev_type=dev_type, size=size,
logical_id=logical_id_fn(idx, disk_index, disk),
iv_name="disk/%d" % disk_index,
mode=disk[constants.IDISK_MODE],
params=params,
spindles=disk.get(constants.IDISK_SPINDLES))
disk_dev.name = disk.get(constants.IDISK_NAME, None)
disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
disks.append(disk_dev)
return disks
def CheckSpindlesExclusiveStorage(diskdict, es_flag, required):
"""Check the presence of the spindle options with exclusive_storage.
@type diskdict: dict
@param diskdict: disk parameters
@type es_flag: bool
@param es_flag: the effective value of the exlusive_storage flag
@type required: bool
@param required: whether spindles are required or just optional
@raise errors.OpPrereqError when spindles are given and they should not
"""
if (not es_flag and constants.IDISK_SPINDLES in diskdict and
diskdict[constants.IDISK_SPINDLES] is not None):
raise errors.OpPrereqError("Spindles in instance disks cannot be specified"
" when exclusive storage is not active",
errors.ECODE_INVAL)
if (es_flag and required and (constants.IDISK_SPINDLES not in diskdict or
diskdict[constants.IDISK_SPINDLES] is None)):
raise errors.OpPrereqError("You must specify spindles in instance disks"
" when exclusive storage is active",
errors.ECODE_INVAL)
class LUInstanceRecreateDisks(LogicalUnit):
"""Recreate an instance's missing disks.
"""
HPATH = "instance-recreate-disks"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
_MODIFYABLE = compat.UniqueFrozenset([
constants.IDISK_SIZE,
constants.IDISK_MODE,
constants.IDISK_SPINDLES,
])
# New or changed disk parameters may have different semantics
assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
constants.IDISK_ADOPT,
# TODO: Implement support changing VG while recreating
constants.IDISK_VG,
constants.IDISK_METAVG,
constants.IDISK_PROVIDER,
constants.IDISK_NAME,
constants.IDISK_ACCESS,
]))
def _RunAllocator(self):
"""Run the allocator based on input opcode.
"""
be_full = self.cfg.GetClusterInfo().FillBE(self.instance)
# FIXME
# The allocator should actually run in "relocate" mode, but current
# allocators don't support relocating all the nodes of an instance at
# the same time. As a workaround we use "allocate" mode, but this is
# suboptimal for two reasons:
# - The instance name passed to the allocator is present in the list of
# existing instances, so there could be a conflict within the
# internal structures of the allocator. This doesn't happen with the
# current allocators, but it's a liability.
# - The allocator counts the resources used by the instance twice: once
# because the instance exists already, and once because it tries to
# allocate a new instance.
# The allocator could choose some of the nodes on which the instance is
# running, but that's not a problem. If the instance nodes are broken,
# they should be already be marked as drained or offline, and hence
# skipped by the allocator. If instance disks have been lost for other
# reasons, then recreating the disks on the same nodes should be fine.
disk_template = self.instance.disk_template
spindle_use = be_full[constants.BE_SPINDLE_USE]
disks = [{
constants.IDISK_SIZE: d.size,
constants.IDISK_MODE: d.mode,
constants.IDISK_SPINDLES: d.spindles,
} for d in self.cfg.GetInstanceDisks(self.instance.uuid)]
req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name,
disk_template=disk_template,
group_name=None,
tags=list(self.instance.GetTags()),
os=self.instance.os,
nics=[{}],
vcpus=be_full[constants.BE_VCPUS],
memory=be_full[constants.BE_MAXMEM],
spindle_use=spindle_use,
disks=disks,
hypervisor=self.instance.hypervisor,
node_whitelist=None)
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
ial.Run(self.op.iallocator)
assert req.RequiredNodes() == \
len(self.cfg.GetInstanceNodes(self.instance.uuid))
if not ial.success:
raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
" %s" % (self.op.iallocator, ial.info),
errors.ECODE_NORES)
(self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, ial.result)
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
self.op.instance_name, self.op.iallocator,
utils.CommaJoin(self.op.nodes))
def CheckArguments(self):
if self.op.disks and ht.TNonNegativeInt(self.op.disks[0]):
# Normalize and convert deprecated list of disk indices
self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
if duplicates:
raise errors.OpPrereqError("Some disks have been specified more than"
" once: %s" % utils.CommaJoin(duplicates),
errors.ECODE_INVAL)
# We don't want _CheckIAllocatorOrNode selecting the default iallocator
# when neither iallocator nor nodes are specified
if self.op.iallocator or self.op.nodes:
CheckIAllocatorOrNode(self, "iallocator", "nodes")
for (idx, params) in self.op.disks:
utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
unsupported = frozenset(params.keys()) - self._MODIFYABLE
if unsupported:
raise errors.OpPrereqError("Parameters for disk %s try to change"
" unmodifyable parameter(s): %s" %
(idx, utils.CommaJoin(unsupported)),
errors.ECODE_INVAL)
def ExpandNames(self):
self._ExpandAndLockInstance()
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
if self.op.nodes:
(self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
self.needed_locks[locking.LEVEL_NODE] = list(self.op.node_uuids)
else:
self.needed_locks[locking.LEVEL_NODE] = []
if self.op.iallocator:
# iallocator will select a new node in the same group
self.needed_locks[locking.LEVEL_NODEGROUP] = []
self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
self.dont_collate_locks[locking.LEVEL_NODE] = True
self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
def DeclareLocks(self, level):
if level == locking.LEVEL_NODEGROUP:
assert self.op.iallocator is not None
assert not self.op.nodes
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
self.share_locks[locking.LEVEL_NODEGROUP] = 1
# Lock the primary group used by the instance optimistically; this
# requires going via the node before it's locked, requiring
# verification later on
self.needed_locks[locking.LEVEL_NODEGROUP] = \
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid, primary_only=True)
elif level == locking.LEVEL_NODE:
# If an allocator is used, then we lock all the nodes in the current
# instance group, as we don't know yet which ones will be selected;
# if we replace the nodes without using an allocator, locks are
# already declared in ExpandNames; otherwise, we need to lock all the
# instance nodes for disk re-creation
if self.op.iallocator:
assert not self.op.nodes
assert not self.needed_locks[locking.LEVEL_NODE]
assert len(self.owned_locks(locking.LEVEL_NODEGROUP)) == 1
# Lock member nodes of the group of the primary node
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP):
self.needed_locks[locking.LEVEL_NODE].extend(
self.cfg.GetNodeGroup(group_uuid).members)
assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
elif not self.op.nodes:
self._LockInstancesNodes(primary_only=False)
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
return BuildInstanceHookEnvByObject(self, self.instance)
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + \
list(self.cfg.GetInstanceNodes(self.instance.uuid))
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster and is not running.
"""
instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
if self.op.node_uuids:
inst_nodes = self.cfg.GetInstanceNodes(instance.uuid)
if len(self.op.node_uuids) != len(inst_nodes):
raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
" %d replacement nodes were specified" %
(instance.name, len(inst_nodes),
len(self.op.node_uuids)),
errors.ECODE_INVAL)
assert instance.disk_template != constants.DT_DRBD8 or \
len(self.op.node_uuids) == 2
assert instance.disk_template != constants.DT_PLAIN or \
len(self.op.node_uuids) == 1
primary_node = self.op.node_uuids[0]
else:
primary_node = instance.primary_node
if not self.op.iallocator:
CheckNodeOnline(self, primary_node)
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
self.op.instance_name, errors.ECODE_INVAL)
# Verify if node group locks are still correct
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
if owned_groups:
# Node group locks are acquired only for the primary node (and only
# when the allocator is used)
CheckInstanceNodeGroups(self.cfg, instance.uuid, owned_groups,
primary_only=True)
# if we replace nodes *and* the old primary is offline, we don't
# check the instance state
old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
if not ((self.op.iallocator or self.op.node_uuids) and old_pnode.offline):
CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
msg="cannot recreate disks")
if self.op.disks:
self.disks = dict(self.op.disks)
else:
self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
maxidx = max(self.disks.keys())
if maxidx >= len(instance.disks):
raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
errors.ECODE_INVAL)
if ((self.op.node_uuids or self.op.iallocator) and
sorted(self.disks.keys()) != range(len(instance.disks))):
raise errors.OpPrereqError("Can't recreate disks partially and"
" change the nodes at the same time",
errors.ECODE_INVAL)
self.instance = instance
if self.op.iallocator:
self._RunAllocator()
# Release unneeded node and node resource locks
ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.node_uuids)
ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.node_uuids)
ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
if self.op.node_uuids:
node_uuids = self.op.node_uuids
else:
node_uuids = self.cfg.GetInstanceNodes(instance.uuid)
excl_stor = compat.any(
rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids).values()
)
for new_params in self.disks.values():
CheckSpindlesExclusiveStorage(new_params, excl_stor, False)
def Exec(self, feedback_fn):
"""Recreate the disks.
"""
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES))
to_skip = []
mods = [] # keeps track of needed changes
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
for idx, disk in enumerate(inst_disks):
try:
changes = self.disks[idx]
except KeyError:
# Disk should not be recreated
to_skip.append(idx)
continue
# update secondaries for disks, if needed
if self.op.node_uuids and disk.dev_type == constants.DT_DRBD8:
# need to update the nodes and minors
assert len(self.op.node_uuids) == 2
assert len(disk.logical_id) == 6 # otherwise disk internals
# have changed
(_, _, old_port, _, _, old_secret) = disk.logical_id
new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids,
self.instance.uuid)
new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port,
new_minors[0], new_minors[1], old_secret)
assert len(disk.logical_id) == len(new_id)
else:
new_id = None
mods.append((idx, new_id, changes))
# now that we have passed all asserts above, we can apply the mods
# in a single run (to avoid partial changes)
for idx, new_id, changes in mods:
disk = inst_disks[idx]
if new_id is not None:
assert disk.dev_type == constants.DT_DRBD8
disk.logical_id = new_id
if changes:
disk.Update(size=changes.get(constants.IDISK_SIZE, None),
mode=changes.get(constants.IDISK_MODE, None),
spindles=changes.get(constants.IDISK_SPINDLES, None))
self.cfg.Update(disk, feedback_fn)
# change primary node, if needed
if self.op.node_uuids:
self.instance.primary_node = self.op.node_uuids[0]
self.LogWarning("Changing the instance's nodes, you will have to"
" remove any disks left on the older nodes manually")
if self.op.node_uuids:
self.cfg.Update(self.instance, feedback_fn)
# All touched nodes must be locked
mylocks = self.owned_locks(locking.LEVEL_NODE)
inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
assert mylocks.issuperset(frozenset(inst_nodes))
new_disks = CreateDisks(self, self.instance, to_skip=to_skip)
# TODO: Release node locks before wiping, or explain why it's not possible
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
if self.cfg.GetClusterInfo().prealloc_wipe_disks:
wipedisks = [(idx, disk, 0)
for (idx, disk) in enumerate(inst_disks)
if idx not in to_skip]
WipeOrCleanupDisks(self, self.instance, disks=wipedisks,
cleanup=new_disks)
def _PerformNodeInfoCall(lu, node_uuids, vg):
"""Prepares the input and performs a node info call.
@type lu: C{LogicalUnit}
@param lu: a logical unit from which we get configuration data
@type node_uuids: list of string
@param node_uuids: list of node UUIDs to perform the call for
@type vg: string
@param vg: the volume group's name
"""
lvm_storage_units = [(constants.ST_LVM_VG, vg)]
storage_units = rpc.PrepareStorageUnitsForNodes(lu.cfg, lvm_storage_units,
node_uuids)
hvname = lu.cfg.GetHypervisorType()
hvparams = lu.cfg.GetClusterInfo().hvparams
nodeinfo = lu.rpc.call_node_info(node_uuids, storage_units,
[(hvname, hvparams[hvname])])
return nodeinfo
def _CheckVgCapacityForNode(node_name, node_info, vg, requested):
"""Checks the vg capacity for a given node.
@type node_info: tuple (_, list of dicts, _)
@param node_info: the result of the node info call for one node
@type node_name: string
@param node_name: the name of the node
@type vg: string
@param vg: volume group name
@type requested: int
@param requested: the amount of disk in MiB to check for
@raise errors.OpPrereqError: if the node doesn't have enough disk,
or we cannot check the node
"""
(_, space_info, _) = node_info
lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
space_info, constants.ST_LVM_VG)
if not lvm_vg_info:
raise errors.OpPrereqError("Can't retrieve storage information for LVM")
vg_free = lvm_vg_info.get("storage_free", None)
if not isinstance(vg_free, int):
raise errors.OpPrereqError("Can't compute free disk space on node"
" %s for vg %s, result was '%s'" %
(node_name, vg, vg_free), errors.ECODE_ENVIRON)
if requested > vg_free:
raise errors.OpPrereqError("Not enough disk space on target node %s"
" vg %s: required %d MiB, available %d MiB" %
(node_name, vg, requested, vg_free),
errors.ECODE_NORES)
def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
"""Checks if nodes have enough free disk space in the specified VG.
This function checks if all given nodes have the needed amount of
free disk. In case any node has less disk or we cannot get the
information from the node, this function raises an OpPrereqError
exception.
@type lu: C{LogicalUnit}
@param lu: a logical unit from which we get configuration data
@type node_uuids: C{list}
@param node_uuids: the list of node UUIDs to check
@type vg: C{str}
@param vg: the volume group to check
@type requested: C{int}
@param requested: the amount of disk in MiB to check for
@raise errors.OpPrereqError: if the node doesn't have enough disk,
or we cannot check the node
"""
nodeinfo = _PerformNodeInfoCall(lu, node_uuids, vg)
for node_uuid in node_uuids:
node_name = lu.cfg.GetNodeName(node_uuid)
info = nodeinfo[node_uuid]
info.Raise("Cannot get current information from node %s" % node_name,
prereq=True, ecode=errors.ECODE_ENVIRON)
_CheckVgCapacityForNode(node_name, info.payload, vg, requested)
def CheckNodesFreeDiskPerVG(lu, node_uuids, req_sizes):
"""Checks if nodes have enough free disk space in all the VGs.
This function checks if all given nodes have the needed amount of
free disk. In case any node has less disk or we cannot get the
information from the node, this function raises an OpPrereqError
exception.
@type lu: C{LogicalUnit}
@param lu: a logical unit from which we get configuration data
@type node_uuids: C{list}
@param node_uuids: the list of node UUIDs to check
@type req_sizes: C{dict}
@param req_sizes: the hash of vg and corresponding amount of disk in
MiB to check for
@raise errors.OpPrereqError: if the node doesn't have enough disk,
or we cannot check the node
"""
for vg, req_size in req_sizes.items():
_CheckNodesFreeDiskOnVG(lu, node_uuids, vg, req_size)
def _DiskSizeInBytesToMebibytes(lu, size):
"""Converts a disk size in bytes to mebibytes.
Warns and rounds up if the size isn't an even multiple of 1 MiB.
"""
(mib, remainder) = divmod(size, 1024 * 1024)
if remainder != 0:
lu.LogWarning("Disk size is not an even multiple of 1 MiB; rounding up"
" to not overwrite existing data (%s bytes will not be"
" wiped)", (1024 * 1024) - remainder)
mib += 1
return mib
def _CalcEta(time_taken, written, total_size):
"""Calculates the ETA based on size written and total size.
@param time_taken: The time taken so far
@param written: amount written so far
@param total_size: The total size of data to be written
@return: The remaining time in seconds
"""
avg_time = time_taken / float(written)
return (total_size - written) * avg_time
def WipeDisks(lu, instance, disks=None):
"""Wipes instance disks.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should create
@type disks: None or list of tuple of (number, L{objects.Disk}, number)
@param disks: Disk details; tuple contains disk index, disk object and the
start offset
"""
node_uuid = instance.primary_node
node_name = lu.cfg.GetNodeName(node_uuid)
if disks is None:
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
disks = [(idx, disk, 0)
for (idx, disk) in enumerate(inst_disks)]
logging.info("Pausing synchronization of disks of instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
True)
result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
for idx, success in enumerate(result.payload):
if not success:
logging.warn("Pausing synchronization of disk %s of instance '%s'"
" failed", idx, instance.name)
try:
for (idx, device, offset) in disks:
# The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
# MAX_WIPE_CHUNK at max. Truncating to integer to avoid rounding errors.
wipe_chunk_size = \
int(min(constants.MAX_WIPE_CHUNK,
device.size / 100.0 * constants.MIN_WIPE_CHUNK_PERCENT))
size = device.size
last_output = 0
start_time = time.time()
if offset == 0:
info_text = ""
else:
info_text = (" (from %s to %s)" %
(utils.FormatUnit(offset, "h"),
utils.FormatUnit(size, "h")))
lu.LogInfo("* Wiping disk %s%s", idx, info_text)
logging.info("Wiping disk %d for instance %s on node %s using"
" chunk size %s", idx, instance.name, node_name,
wipe_chunk_size)
while offset < size:
wipe_size = min(wipe_chunk_size, size - offset)
logging.debug("Wiping disk %d, offset %s, chunk %s",
idx, offset, wipe_size)
result = lu.rpc.call_blockdev_wipe(node_uuid, (device, instance),
offset, wipe_size)
result.Raise("Could not wipe disk %d at offset %d for size %d" %
(idx, offset, wipe_size))
now = time.time()
offset += wipe_size
if now - last_output >= 60:
eta = _CalcEta(now - start_time, offset, size)
lu.LogInfo(" - done: %.1f%% ETA: %s",
offset / float(size) * 100, utils.FormatSeconds(eta))
last_output = now
finally:
logging.info("Resuming synchronization of disks for instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
False)
if result.fail_msg:
lu.LogWarning("Failed to resume disk synchronization on node '%s': %s",
node_name, result.fail_msg)
else:
for idx, success in enumerate(result.payload):
if not success:
lu.LogWarning("Resuming synchronization of disk %s of instance '%s'"
" failed", idx, instance.name)
def ImageDisks(lu, instance, image, disks=None):
"""Dumps an image onto an instance disk.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should create
@type image: string
@param image: the image whose disks we should create
@type disks: None or list of ints
@param disks: disk indices
"""
node_uuid = instance.primary_node
node_name = lu.cfg.GetNodeName(node_uuid)
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
if disks is None:
disks = [(0, inst_disks[0])]
else:
disks = map(lambda idx: (idx, inst_disks[idx]), disks)
logging.info("Pausing synchronization of disks of instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
True)
result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
for idx, success in enumerate(result.payload):
if not success:
logging.warn("Pausing synchronization of disk %s of instance '%s'"
" failed", idx, instance.name)
try:
for (idx, device) in disks:
lu.LogInfo("Imaging disk '%d' for instance '%s' on node '%s'",
idx, instance.name, node_name)
result = lu.rpc.call_blockdev_image(node_uuid, (device, instance),
image, device.size)
result.Raise("Could not image disk '%d' for instance '%s' on node '%s'" %
(idx, instance.name, node_name))
finally:
logging.info("Resuming synchronization of disks for instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
False)
if result.fail_msg:
lu.LogWarning("Failed to resume disk synchronization for instance '%s' on"
" node '%s'", node_name, result.fail_msg)
else:
for idx, success in enumerate(result.payload):
if not success:
lu.LogWarning("Failed to resume synchronization of disk '%d' of"
" instance '%s'", idx, instance.name)
def WipeOrCleanupDisks(lu, instance, disks=None, cleanup=None):
"""Wrapper for L{WipeDisks} that handles errors.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should wipe
@param disks: see L{WipeDisks}
@param cleanup: the result returned by L{CreateDisks}, used for cleanup in
case of error
@raise errors.OpPrereqError: in case of failure
"""
try:
WipeDisks(lu, instance, disks=disks)
except errors.OpExecError:
logging.warning("Wiping disks for instance '%s' failed",
instance.name)
_UndoCreateDisks(lu, cleanup, instance)
raise
def ExpandCheckDisks(instance_disks, disks):
"""Return the instance disks selected by the disks list
@type disks: list of L{objects.Disk} or None
@param disks: selected disks
@rtype: list of L{objects.Disk}
@return: selected instance disks to act on
"""
if disks is None:
return instance_disks
else:
inst_disks_uuids = [d.uuid for d in instance_disks]
disks_uuids = [d.uuid for d in disks]
if not set(disks_uuids).issubset(inst_disks_uuids):
raise errors.ProgrammerError("Can only act on disks belonging to the"
" target instance: expected a subset of %s,"
" got %s" % (inst_disks_uuids, disks_uuids))
return disks
def WaitForSync(lu, instance, disks=None, oneshot=False):
"""Sleep and poll for an instance's disk to sync.
"""
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
if not inst_disks or disks is not None and not disks:
return True
disks = ExpandCheckDisks(inst_disks, disks)
if not oneshot:
lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
node_uuid = instance.primary_node
node_name = lu.cfg.GetNodeName(node_uuid)
# TODO: Convert to utils.Retry
retries = 0
degr_retries = 10 # in seconds, as we sleep 1 second each time
while True:
max_time = 0
done = True
cumul_degraded = False
rstats = lu.rpc.call_blockdev_getmirrorstatus(node_uuid, (disks, instance))
msg = rstats.fail_msg
if msg:
lu.LogWarning("Can't get any data from node %s: %s", node_name, msg)
retries += 1
if retries >= 10:
raise errors.RemoteError("Can't contact node %s for mirror data,"
" aborting." % node_name)
time.sleep(6)
continue
rstats = rstats.payload
retries = 0
for i, mstat in enumerate(rstats):
if mstat is None:
lu.LogWarning("Can't compute data for node %s/%s",
node_name, disks[i].iv_name)
continue
cumul_degraded = (cumul_degraded or
(mstat.is_degraded and mstat.sync_percent is None))
if mstat.sync_percent is not None:
done = False
if mstat.estimated_time is not None:
rem_time = ("%s remaining (estimated)" %
utils.FormatSeconds(mstat.estimated_time))
max_time = mstat.estimated_time
else:
rem_time = "no time estimate"
max_time = 5 # sleep at least a bit between retries
lu.LogInfo("- device %s: %5.2f%% done, %s",
disks[i].iv_name, mstat.sync_percent, rem_time)
# if we're done but degraded, let's do a few small retries, to
# make sure we see a stable and not transient situation; therefore
# we force restart of the loop
if (done or oneshot) and cumul_degraded and degr_retries > 0:
logging.info("Degraded disks found, %d retries left", degr_retries)
degr_retries -= 1
time.sleep(1)
continue
if done or oneshot:
break
time.sleep(min(60, max_time))
if done:
lu.LogInfo("Instance %s's disks are in sync", instance.name)
return not cumul_degraded
def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
"""Shutdown block devices of an instance.
This does the shutdown on all nodes of the instance.
If the ignore_primary is false, errors on the primary node are
ignored.
Modifies the configuration of the instance, so the caller should re-read the
instance configuration, if needed.
"""
all_result = True
if disks is None:
# only mark instance disks as inactive if all disks are affected
lu.cfg.MarkInstanceDisksInactive(instance.uuid)
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
disks = ExpandCheckDisks(inst_disks, disks)
for disk in disks:
for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node):
result = lu.rpc.call_blockdev_shutdown(node_uuid, (top_disk, instance))
msg = result.fail_msg
if msg:
lu.LogWarning("Could not shutdown block device %s on node %s: %s",
disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
if ((node_uuid == instance.primary_node and not ignore_primary) or
(node_uuid != instance.primary_node and not result.offline)):
all_result = False
return all_result
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
"""Shutdown block devices of an instance.
This function checks if an instance is running, before calling
_ShutdownInstanceDisks.
"""
CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
ShutdownInstanceDisks(lu, instance, disks=disks)
def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
ignore_size=False):
"""Prepare the block devices for an instance.
This sets up the block devices on all nodes.
Modifies the configuration of the instance, so the caller should re-read the
instance configuration, if needed.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance for whose disks we assemble
@type disks: list of L{objects.Disk} or None
@param disks: which disks to assemble (or all, if None)
@type ignore_secondaries: boolean
@param ignore_secondaries: if true, errors on secondary nodes
won't result in an error return from the function
@type ignore_size: boolean
@param ignore_size: if true, the current known size of the disk
will not be used during the disk activation, useful for cases
when the size is wrong
@return: False if the operation failed, otherwise a list of
(host, instance_visible_name, node_visible_name)
with the mapping from node devices to instance devices
"""
device_info = []
disks_ok = True
if disks is None:
# only mark instance disks as active if all disks are affected
instance = lu.cfg.MarkInstanceDisksActive(instance.uuid)
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
disks = ExpandCheckDisks(inst_disks, disks)
# With the two passes mechanism we try to reduce the window of
# opportunity for the race condition of switching DRBD to primary
# before handshaking occured, but we do not eliminate it
# The proper fix would be to wait (with some limits) until the
# connection has been made and drbd transitions from WFConnection
# into any other network-connected state (Connected, SyncTarget,
# SyncSource, etc.)
# 1st pass, assemble on all nodes in secondary mode
for idx, inst_disk in enumerate(disks):
for node_uuid, node_disk in inst_disk.ComputeNodeTree(
instance.primary_node):
if ignore_size:
node_disk = node_disk.Copy()
node_disk.UnsetSize()
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
instance, False, idx)
msg = result.fail_msg
if msg:
secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance.uuid)
is_offline_secondary = (node_uuid in secondary_nodes and
result.offline)
lu.LogWarning("Could not prepare block device %s on node %s"
" (is_primary=False, pass=1): %s",
inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
if not (ignore_secondaries or is_offline_secondary):
disks_ok = False
# FIXME: race condition on drbd migration to primary
# 2nd pass, do only the primary node
for idx, inst_disk in enumerate(disks):
dev_path = None
for node_uuid, node_disk in inst_disk.ComputeNodeTree(
instance.primary_node):
if node_uuid != instance.primary_node:
continue
if ignore_size:
node_disk = node_disk.Copy()
node_disk.UnsetSize()
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
instance, True, idx)
msg = result.fail_msg
if msg:
lu.LogWarning("Could not prepare block device %s on node %s"
" (is_primary=True, pass=2): %s",
inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
disks_ok = False
else:
dev_path, _, __ = result.payload
device_info.append((lu.cfg.GetNodeName(instance.primary_node),
inst_disk.iv_name, dev_path))
if not disks_ok:
lu.cfg.MarkInstanceDisksInactive(instance.uuid)
return disks_ok, device_info
def StartInstanceDisks(lu, instance, force):
"""Start the disks of an instance.
Modifies the configuration of the instance, so the caller should re-read the
instance configuration, if needed.
"""
disks_ok, _ = AssembleInstanceDisks(lu, instance,
ignore_secondaries=force)
if not disks_ok:
ShutdownInstanceDisks(lu, instance)
if force is not None and not force:
lu.LogWarning("",
hint=("If the message above refers to a secondary node,"
" you can retry the operation using '--force'"))
raise errors.OpExecError("Disk consistency error")
class LUInstanceGrowDisk(LogicalUnit):
"""Grow a disk of an instance.
"""
HPATH = "disk-grow"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
self.dont_collate_locks[locking.LEVEL_NODE] = True
self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on the master, the primary and all the secondaries.
"""
env = {
"DISK": self.op.disk,
"AMOUNT": self.op.amount,
"ABSOLUTE": self.op.absolute,
}
env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + \
list(self.cfg.GetInstanceNodes(self.instance.uuid))
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))
for node_uuid in node_uuids:
CheckNodeOnline(self, node_uuid)
self.node_es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids)
if self.instance.disk_template not in constants.DTS_GROWABLE:
raise errors.OpPrereqError("Instance's disk layout does not support"
" growing", errors.ECODE_INVAL)
self.disk = self.cfg.GetDiskInfo(self.instance.FindDisk(self.op.disk))
if self.op.absolute:
self.target = self.op.amount
self.delta = self.target - self.disk.size
if self.delta < 0:
raise errors.OpPrereqError("Requested size (%s) is smaller than "
"current disk size (%s)" %
(utils.FormatUnit(self.target, "h"),
utils.FormatUnit(self.disk.size, "h")),
errors.ECODE_STATE)
else:
self.delta = self.op.amount
self.target = self.disk.size + self.delta
if self.delta < 0:
raise errors.OpPrereqError("Requested increment (%s) is negative" %
utils.FormatUnit(self.delta, "h"),
errors.ECODE_INVAL)
self._CheckDiskSpace(node_uuids, self.disk.ComputeGrowth(self.delta))
def _CheckDiskSpace(self, node_uuids, req_vgspace):
template = self.instance.disk_template
if (template not in (constants.DTS_NO_FREE_SPACE_CHECK) and
not any(self.node_es_flags.values())):
# TODO: check the free disk space for file, when that feature will be
# supported
# With exclusive storage we need to do something smarter than just looking
# at free space, which, in the end, is basically a dry run. So we rely on
# the dry run performed in Exec() instead.
CheckNodesFreeDiskPerVG(self, node_uuids, req_vgspace)
def Exec(self, feedback_fn):
"""Execute disk grow.
"""
assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES))
wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[self.disk])
if not disks_ok:
raise errors.OpExecError("Cannot activate block device to grow")
feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
(self.op.disk, self.instance.name,
utils.FormatUnit(self.delta, "h"),
utils.FormatUnit(self.target, "h")))
# First run all grow ops in dry-run mode
inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
for node_uuid in inst_nodes:
result = self.rpc.call_blockdev_grow(node_uuid,
(self.disk, self.instance),
self.delta, True, True,
self.node_es_flags[node_uuid])
result.Raise("Dry-run grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
if wipe_disks:
# Get disk size from primary node for wiping
result = self.rpc.call_blockdev_getdimensions(
self.instance.primary_node, [([self.disk], self.instance)])
result.Raise("Failed to retrieve disk size from node '%s'" %
self.instance.primary_node)
(disk_dimensions, ) = result.payload
if disk_dimensions is None:
raise errors.OpExecError("Failed to retrieve disk size from primary"
" node '%s'" % self.instance.primary_node)
(disk_size_in_bytes, _) = disk_dimensions
old_disk_size = _DiskSizeInBytesToMebibytes(self, disk_size_in_bytes)
assert old_disk_size >= self.disk.size, \
("Retrieved disk size too small (got %s, should be at least %s)" %
(old_disk_size, self.disk.size))
else:
old_disk_size = None
# We know that (as far as we can test) operations across different
# nodes will succeed, time to run it for real on the backing storage
for node_uuid in inst_nodes:
result = self.rpc.call_blockdev_grow(node_uuid,
(self.disk, self.instance),
self.delta, False, True,
self.node_es_flags[node_uuid])
result.Raise("Grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
# And now execute it for logical storage, on the primary node
node_uuid = self.instance.primary_node
result = self.rpc.call_blockdev_grow(node_uuid, (self.disk, self.instance),
self.delta, False, False,
self.node_es_flags[node_uuid])
result.Raise("Grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
self.disk.RecordGrow(self.delta)
self.cfg.Update(self.instance, feedback_fn)
self.cfg.Update(self.disk, feedback_fn)
# Changes have been recorded, release node lock
ReleaseLocks(self, locking.LEVEL_NODE)
# Downgrade lock while waiting for sync
self.WConfdClient().DownGradeLocksLevel(
locking.LEVEL_NAMES[locking.LEVEL_INSTANCE])
assert wipe_disks ^ (old_disk_size is None)
if wipe_disks:
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
assert inst_disks[self.op.disk] == self.disk
# Wipe newly added disk space
WipeDisks(self, self.instance,
disks=[(self.op.disk, self.disk, old_disk_size)])
if self.op.wait_for_sync:
disk_abort = not WaitForSync(self, self.instance, disks=[self.disk])
if disk_abort:
self.LogWarning("Disk syncing has not returned a good status; check"
" the instance")
if not self.instance.disks_active:
_SafeShutdownInstanceDisks(self, self.instance, disks=[self.disk])
elif not self.instance.disks_active:
self.LogWarning("Not shutting down the disk even if the instance is"
" not supposed to be running because no wait for"
" sync mode was requested")
assert self.owned_locks(locking.LEVEL_NODE_RES)
assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
class LUInstanceReplaceDisks(LogicalUnit):
"""Replace the disks of an instance.
"""
HPATH = "mirrors-replace"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def CheckArguments(self):
"""Check arguments.
"""
if self.op.mode == constants.REPLACE_DISK_CHG:
if self.op.remote_node is None and self.op.iallocator is None:
raise errors.OpPrereqError("When changing the secondary either an"
" iallocator script must be used or the"
" new node given", errors.ECODE_INVAL)
else:
CheckIAllocatorOrNode(self, "iallocator", "remote_node")
elif self.op.remote_node is not None or self.op.iallocator is not None:
# Not replacing the secondary
raise errors.OpPrereqError("The iallocator and new node options can"
" only be used when changing the"
" secondary node", errors.ECODE_INVAL)
def ExpandNames(self):
self._ExpandAndLockInstance()
assert locking.LEVEL_NODE not in self.needed_locks
assert locking.LEVEL_NODE_RES not in self.needed_locks
assert locking.LEVEL_NODEGROUP not in self.needed_locks
assert self.op.iallocator is None or self.op.remote_node is None, \
"Conflicting options"
if self.op.remote_node is not None:
(self.op.remote_node_uuid, self.op.remote_node) = \
ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
self.op.remote_node)
# Warning: do not remove the locking of the new secondary here
# unless DRBD8Dev.AddChildren is changed to work in parallel;
# currently it doesn't since parallel invocations of
# FindUnusedMinor will conflict
self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node_uuid]
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
else:
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
if self.op.iallocator is not None:
# iallocator will select a new node in the same group
self.needed_locks[locking.LEVEL_NODEGROUP] = []
self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
self.dont_collate_locks[locking.LEVEL_NODE] = True
self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
self.replacer = TLReplaceDisks(self, self.op.instance_uuid,
self.op.instance_name, self.op.mode,
self.op.iallocator, self.op.remote_node_uuid,
self.op.disks, self.op.early_release,
self.op.ignore_ipolicy)
self.tasklets = [self.replacer]
def DeclareLocks(self, level):
if level == locking.LEVEL_NODEGROUP:
assert self.op.remote_node_uuid is None
assert self.op.iallocator is not None
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
self.share_locks[locking.LEVEL_NODEGROUP] = 1
# Lock all groups used by instance optimistically; this requires going
# via the node before it's locked, requiring verification later on
self.needed_locks[locking.LEVEL_NODEGROUP] = \
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
elif level == locking.LEVEL_NODE:
if self.op.iallocator is not None:
assert self.op.remote_node_uuid is None
assert not self.needed_locks[locking.LEVEL_NODE]
assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
# Lock member nodes of all locked groups
self.needed_locks[locking.LEVEL_NODE] = \
[node_uuid
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
for node_uuid in self.cfg.GetNodeGroup(group_uuid).members]
else:
self._LockInstancesNodes()
elif level == locking.LEVEL_NODE_RES:
# Reuse node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
self.needed_locks[locking.LEVEL_NODE]
def BuildHooksEnv(self):
"""Build hooks env.
This runs on the master, the primary and all the secondaries.
"""
instance = self.replacer.instance
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
env = {
"MODE": self.op.mode,
"NEW_SECONDARY": self.op.remote_node,
"OLD_SECONDARY": self.cfg.GetNodeName(secondary_nodes[0]),
}
env.update(BuildInstanceHookEnvByObject(self, instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
instance = self.replacer.instance
nl = [
self.cfg.GetMasterNode(),
instance.primary_node,
]
if self.op.remote_node_uuid is not None:
nl.append(self.op.remote_node_uuid)
return nl, nl
def CheckPrereq(self):
"""Check prerequisites.
"""
# Verify if node group locks are still correct
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
if owned_groups:
CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, owned_groups)
return LogicalUnit.CheckPrereq(self)
class LUInstanceActivateDisks(NoHooksLU):
"""Bring up an instance's disks.
"""
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
CheckNodeOnline(self, self.instance.primary_node)
def Exec(self, feedback_fn):
"""Activate the disks.
"""
disks_ok, disks_info = \
AssembleInstanceDisks(self, self.instance,
ignore_size=self.op.ignore_size)
if not disks_ok:
raise errors.OpExecError("Cannot activate block devices")
if self.op.wait_for_sync:
if not WaitForSync(self, self.instance):
self.cfg.MarkInstanceDisksInactive(self.instance.uuid)
raise errors.OpExecError("Some disks of the instance are degraded!")
return disks_info
class LUInstanceDeactivateDisks(NoHooksLU):
"""Shutdown an instance's disks.
"""
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
def Exec(self, feedback_fn):
"""Deactivate the disks
"""
if self.op.force:
ShutdownInstanceDisks(self, self.instance)
else:
_SafeShutdownInstanceDisks(self, self.instance)
def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
ldisk=False):
"""Check that mirrors are not degraded.
@attention: The device has to be annotated already.
The ldisk parameter, if True, will change the test from the
is_degraded attribute (which represents overall non-ok status for
the device(s)) to the ldisk (representing the local storage status).
"""
result = True
if on_primary or dev.AssembleOnSecondary():
rstats = lu.rpc.call_blockdev_find(node_uuid, (dev, instance))
msg = rstats.fail_msg
if msg:
lu.LogWarning("Can't find disk on node %s: %s",
lu.cfg.GetNodeName(node_uuid), msg)
result = False
elif not rstats.payload:
lu.LogWarning("Can't find disk on node %s", lu.cfg.GetNodeName(node_uuid))
result = False
else:
if ldisk:
result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
else:
result = result and not rstats.payload.is_degraded
if dev.children:
for child in dev.children:
result = result and _CheckDiskConsistencyInner(lu, instance, child,
node_uuid, on_primary)
return result
def CheckDiskConsistency(lu, instance, dev, node_uuid, on_primary, ldisk=False):
"""Wrapper around L{_CheckDiskConsistencyInner}.
"""
(disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
return _CheckDiskConsistencyInner(lu, instance, disk, node_uuid, on_primary,
ldisk=ldisk)
def _BlockdevFind(lu, node_uuid, dev, instance):
"""Wrapper around call_blockdev_find to annotate diskparams.
@param lu: A reference to the lu object
@param node_uuid: The node to call out
@param dev: The device to find
@param instance: The instance object the device belongs to
@returns The result of the rpc call
"""
(disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
return lu.rpc.call_blockdev_find(node_uuid, (disk, instance))
def _GenerateUniqueNames(lu, exts):
"""Generate a suitable LV name.
This will generate a logical volume name for the given instance.
"""
results = []
for val in exts:
new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
results.append("%s%s" % (new_id, val))
return results
class TLReplaceDisks(Tasklet):
"""Replaces disks for an instance.
Note: Locking is not within the scope of this class.
"""
def __init__(self, lu, instance_uuid, instance_name, mode, iallocator_name,
remote_node_uuid, disks, early_release, ignore_ipolicy):
"""Initializes this class.
"""
Tasklet.__init__(self, lu)
# Parameters
self.instance_uuid = instance_uuid
self.instance_name = instance_name
self.mode = mode
self.iallocator_name = iallocator_name
self.remote_node_uuid = remote_node_uuid
self.disks = disks
self.early_release = early_release
self.ignore_ipolicy = ignore_ipolicy
# Runtime data
self.instance = None
self.new_node_uuid = None
self.target_node_uuid = None
self.other_node_uuid = None
self.remote_node_info = None
self.node_secondary_ip = None
@staticmethod
def _RunAllocator(lu, iallocator_name, instance_uuid,
relocate_from_node_uuids):
"""Compute a new secondary node using an IAllocator.
"""
req = iallocator.IAReqRelocate(
inst_uuid=instance_uuid,
relocate_from_node_uuids=list(relocate_from_node_uuids))
ial = iallocator.IAllocator(lu.cfg, lu.rpc, req)
ial.Run(iallocator_name)
if not ial.success:
raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
" %s" % (iallocator_name, ial.info),
errors.ECODE_NORES)
remote_node_name = ial.result[0]
remote_node = lu.cfg.GetNodeInfoByName(remote_node_name)
if remote_node is None:
raise errors.OpPrereqError("Node %s not found in configuration" %
remote_node_name, errors.ECODE_NOENT)
lu.LogInfo("Selected new secondary for instance '%s': %s",
instance_uuid, remote_node_name)
return remote_node.uuid
def _FindFaultyDisks(self, node_uuid):
"""Wrapper for L{FindFaultyInstanceDisks}.
"""
return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
node_uuid, True)
def _CheckDisksActivated(self, instance):
"""Checks if the instance disks are activated.
@param instance: The instance to check disks
@return: True if they are activated, False otherwise
"""
node_uuids = self.cfg.GetInstanceNodes(instance.uuid)
for idx, dev in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
for node_uuid in node_uuids:
self.lu.LogInfo("Checking disk/%d on %s", idx,
self.cfg.GetNodeName(node_uuid))
result = _BlockdevFind(self, node_uuid, dev, instance)
if result.offline:
continue
elif result.fail_msg or not result.payload:
return False
return True
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.instance_name
if self.instance.disk_template != constants.DT_DRBD8:
raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
" instances", errors.ECODE_INVAL)
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
if len(secondary_nodes) != 1:
raise errors.OpPrereqError("The instance has a strange layout,"
" expected one secondary but found %d" %
len(secondary_nodes),
errors.ECODE_FAULT)
secondary_node_uuid = secondary_nodes[0]
if self.iallocator_name is None:
remote_node_uuid = self.remote_node_uuid
else:
remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name,
self.instance.uuid,
secondary_nodes)
if remote_node_uuid is None:
self.remote_node_info = None
else:
assert remote_node_uuid in self.lu.owned_locks(locking.LEVEL_NODE), \
"Remote node '%s' is not locked" % remote_node_uuid
self.remote_node_info = self.cfg.GetNodeInfo(remote_node_uuid)
assert self.remote_node_info is not None, \
"Cannot retrieve locked node %s" % remote_node_uuid
if remote_node_uuid == self.instance.primary_node:
raise errors.OpPrereqError("The specified node is the primary node of"
" the instance", errors.ECODE_INVAL)
if remote_node_uuid == secondary_node_uuid:
raise errors.OpPrereqError("The specified node is already the"
" secondary node of the instance",
errors.ECODE_INVAL)
if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
constants.REPLACE_DISK_CHG):
raise errors.OpPrereqError("Cannot specify disks to be replaced",
errors.ECODE_INVAL)
if self.mode == constants.REPLACE_DISK_AUTO:
if not self._CheckDisksActivated(self.instance):
raise errors.OpPrereqError("Please run activate-disks on instance %s"
" first" % self.instance_name,
errors.ECODE_STATE)
faulty_primary = self._FindFaultyDisks(self.instance.primary_node)
faulty_secondary = self._FindFaultyDisks(secondary_node_uuid)
if faulty_primary and faulty_secondary:
raise errors.OpPrereqError("Instance %s has faulty disks on more than"
" one node and can not be repaired"
" automatically" % self.instance_name,
errors.ECODE_STATE)
if faulty_primary:
self.disks = faulty_primary
self.target_node_uuid = self.instance.primary_node
self.other_node_uuid = secondary_node_uuid
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif faulty_secondary:
self.disks = faulty_secondary
self.target_node_uuid = secondary_node_uuid
self.other_node_uuid = self.instance.primary_node
check_nodes = [self.target_node_uuid, self.other_node_uuid]
else:
self.disks = []
check_nodes = []
else:
# Non-automatic modes
if self.mode == constants.REPLACE_DISK_PRI:
self.target_node_uuid = self.instance.primary_node
self.other_node_uuid = secondary_node_uuid
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif self.mode == constants.REPLACE_DISK_SEC:
self.target_node_uuid = secondary_node_uuid
self.other_node_uuid = self.instance.primary_node
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif self.mode == constants.REPLACE_DISK_CHG:
self.new_node_uuid = remote_node_uuid
self.other_node_uuid = self.instance.primary_node
self.target_node_uuid = secondary_node_uuid
| check_nodes = [self.new_node_uuid, self.other_node_uuid] | 8,172 | lcc_e | python | null | 1f82635fa73c4dfcf97ed3cc926641f71d98aa4e5fa64e78 |
|
#
# commands.py - the GraalVM specific commands
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import os, stat, errno, sys, shutil, zipfile, tarfile, tempfile, re, time, datetime, platform, subprocess, multiprocessing, StringIO, socket
from os.path import join, exists, dirname, basename
from argparse import ArgumentParser, RawDescriptionHelpFormatter, REMAINDER
from outputparser import OutputParser, ValuesMatcher
import mx
import xml.dom.minidom
import sanitycheck
import itertools
import json, textwrap
import fnmatch
# This works because when mx loads this file, it makes sure __file__ gets an absolute path
_graal_home = dirname(dirname(__file__))
""" Used to distinguish an exported GraalVM (see 'mx export'). """
_vmSourcesAvailable = exists(join(_graal_home, 'make')) and exists(join(_graal_home, 'src'))
""" The VMs that can be built and run along with an optional description. Only VMs with a
description are listed in the dialogue for setting the default VM (see _get_vm()). """
_vmChoices = {
'graal' : 'Normal compilation is performed with a tiered system (C1 + Graal), Truffle compilation is performed with Graal.',
'server' : 'Normal compilation is performed with a tiered system (C1 + C2), Truffle compilation is performed with Graal. Use this for optimal Truffle performance.',
'client' : None, # normal compilation with client compiler, explicit compilation (e.g., by Truffle) with Graal
'server-nograal' : None, # all compilation with tiered system (i.e., client + server), Graal omitted
'client-nograal' : None, # all compilation with client compiler, Graal omitted
'original' : None, # default VM copied from bootstrap JDK
}
""" The VM that will be run by the 'vm' command and built by default by the 'build' command.
This can be set via the global '--vm' option or the DEFAULT_VM environment variable.
It can also be temporarily set by using of a VM context manager object in a 'with' statement. """
_vm = None
""" The VM builds that will be run by the 'vm' command - default is first in list """
_vmbuildChoices = ['product', 'fastdebug', 'debug', 'optimized']
""" The VM build that will be run by the 'vm' command.
This can be set via the global '--vmbuild' option.
It can also be temporarily set by using of a VM context manager object in a 'with' statement. """
_vmbuild = _vmbuildChoices[0]
_jacoco = 'off'
""" The current working directory to switch to before running the VM. """
_vm_cwd = None
""" The base directory in which the JDKs cloned from $JAVA_HOME exist. """
_installed_jdks = None
""" Prefix for running the VM. """
_vm_prefix = None
_make_eclipse_launch = False
_minVersion = mx.VersionSpec('1.8')
JDK_UNIX_PERMISSIONS = 0755
def isVMSupported(vm):
if 'client' in vm and len(platform.mac_ver()[0]) != 0:
# Client VM not supported: java launcher on Mac OS X translates '-client' to '-server'
return False
return True
def _get_vm():
"""
Gets the configured VM, presenting a dialogue if there is no currently configured VM.
"""
global _vm
if _vm:
return _vm
vm = mx.get_env('DEFAULT_VM')
if vm is None:
if not sys.stdout.isatty():
mx.abort('Need to specify VM with --vm option or DEFAULT_VM environment variable')
envPath = join(_graal_home, 'mx', 'env')
mx.log('Please select the VM to be executed from the following: ')
items = [k for k in _vmChoices.keys() if _vmChoices[k] is not None]
descriptions = [_vmChoices[k] for k in _vmChoices.keys() if _vmChoices[k] is not None]
vm = mx.select_items(items, descriptions, allowMultiple=False)
if mx.ask_yes_no('Persist this choice by adding "DEFAULT_VM=' + vm + '" to ' + envPath, 'y'):
with open(envPath, 'a') as fp:
print >> fp, 'DEFAULT_VM=' + vm
_vm = vm
return vm
"""
A context manager that can be used with the 'with' statement to set the VM
used by all VM executions within the scope of the 'with' statement. For example:
with VM('server'):
dacapo(['pmd'])
"""
class VM:
def __init__(self, vm=None, build=None):
assert vm is None or vm in _vmChoices.keys()
assert build is None or build in _vmbuildChoices
self.vm = vm if vm else _vm
self.build = build if build else _vmbuild
self.previousVm = _vm
self.previousBuild = _vmbuild
def __enter__(self):
global _vm, _vmbuild
_vm = self.vm
_vmbuild = self.build
def __exit__(self, exc_type, exc_value, traceback):
global _vm, _vmbuild
_vm = self.previousVm
_vmbuild = self.previousBuild
def _chmodDir(chmodFlags, dirname, fnames):
os.chmod(dirname, chmodFlags)
for name in fnames:
os.chmod(os.path.join(dirname, name), chmodFlags)
def chmodRecursive(dirname, chmodFlags):
os.path.walk(dirname, _chmodDir, chmodFlags)
def clean(args):
"""clean the GraalVM source tree"""
opts = mx.clean(args, parser=ArgumentParser(prog='mx clean'))
if opts.native:
def handleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if mx.get_os() == 'windows' and func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
func(path)
else:
raise
def rmIfExists(name):
if os.path.isdir(name):
shutil.rmtree(name, ignore_errors=False, onerror=handleRemoveReadonly)
elif os.path.isfile(name):
os.unlink(name)
rmIfExists(join(_graal_home, 'build'))
rmIfExists(join(_graal_home, 'build-nograal'))
rmIfExists(_jdksDir())
def export(args):
"""create archives of builds split by vmbuild and vm"""
parser = ArgumentParser(prog='mx export')
args = parser.parse_args(args)
# collect data about export
infos = dict()
infos['timestamp'] = time.time()
hgcfg = mx.HgConfig()
hgcfg.check()
infos['revision'] = hgcfg.tip('.') + ('+' if hgcfg.isDirty('.') else '')
# TODO: infos['repository']
infos['jdkversion'] = str(mx.java().version)
infos['architecture'] = _arch()
infos['platform'] = mx.get_os()
if mx.get_os != 'windows':
pass
# infos['ccompiler']
# infos['linker']
infos['hostname'] = socket.gethostname()
def _writeJson(suffix, properties):
d = infos.copy()
for k, v in properties.iteritems():
assert not d.has_key(k)
d[k] = v
jsonFileName = 'export-' + suffix + '.json'
with open(jsonFileName, 'w') as f:
print >> f, json.dumps(d)
return jsonFileName
def _genFileName(archivtype, middle):
idPrefix = infos['revision'] + '_'
idSuffix = '.tar.gz'
return join(_graal_home, "graalvm_" + archivtype + "_" + idPrefix + middle + idSuffix)
def _genFileArchPlatformName(archivtype, middle):
return _genFileName(archivtype, infos['platform'] + '_' + infos['architecture'] + '_' + middle)
# archive different build types of hotspot
for vmBuild in _vmbuildChoices:
jdkpath = join(_jdksDir(), vmBuild)
if not exists(jdkpath):
mx.logv("skipping " + vmBuild)
continue
tarName = _genFileArchPlatformName('basejdk', vmBuild)
mx.logv("creating basejdk " + tarName)
vmSet = set()
with tarfile.open(tarName, 'w:gz') as tar:
for root, _, files in os.walk(jdkpath):
if basename(root) in _vmChoices.keys():
# TODO: add some assert to check path assumption
vmSet.add(root)
continue
for f in files:
name = join(root, f)
# print name
tar.add(name, name)
n = _writeJson("basejdk-" + vmBuild, {'vmbuild' : vmBuild})
tar.add(n, n)
# create a separate archive for each VM
for vm in vmSet:
bVm = basename(vm)
vmTarName = _genFileArchPlatformName('vm', vmBuild + '_' + bVm)
mx.logv("creating vm " + vmTarName)
debugFiles = set()
with tarfile.open(vmTarName, 'w:gz') as tar:
for root, _, files in os.walk(vm):
for f in files:
# TODO: mac, windows, solaris?
if any(map(f.endswith, [".debuginfo"])):
debugFiles.add(f)
else:
name = join(root, f)
# print name
tar.add(name, name)
n = _writeJson("vm-" + vmBuild + "-" + bVm, {'vmbuild' : vmBuild, 'vm' : bVm})
tar.add(n, n)
if len(debugFiles) > 0:
debugTarName = _genFileArchPlatformName('debugfilesvm', vmBuild + '_' + bVm)
mx.logv("creating debugfilesvm " + debugTarName)
with tarfile.open(debugTarName, 'w:gz') as tar:
for f in debugFiles:
name = join(root, f)
# print name
tar.add(name, name)
n = _writeJson("debugfilesvm-" + vmBuild + "-" + bVm, {'vmbuild' : vmBuild, 'vm' : bVm})
tar.add(n, n)
# graal directory
graalDirTarName = _genFileName('classfiles', 'javac')
mx.logv("creating graal " + graalDirTarName)
with tarfile.open(graalDirTarName, 'w:gz') as tar:
for root, _, files in os.walk("graal"):
for f in [f for f in files if not f.endswith('.java')]:
name = join(root, f)
# print name
tar.add(name, name)
n = _writeJson("graal", {'javacompiler' : 'javac'})
tar.add(n, n)
def _run_benchmark(args, availableBenchmarks, runBenchmark):
vmOpts, benchmarksAndOptions = _extract_VM_args(args, useDoubleDash=availableBenchmarks is None)
if availableBenchmarks is None:
harnessArgs = benchmarksAndOptions
return runBenchmark(None, harnessArgs, vmOpts)
if len(benchmarksAndOptions) == 0:
mx.abort('at least one benchmark name or "all" must be specified')
benchmarks = list(itertools.takewhile(lambda x: not x.startswith('-'), benchmarksAndOptions))
harnessArgs = benchmarksAndOptions[len(benchmarks):]
if 'all' in benchmarks:
benchmarks = availableBenchmarks
else:
for bm in benchmarks:
if bm not in availableBenchmarks:
mx.abort('unknown benchmark: ' + bm + '\nselect one of: ' + str(availableBenchmarks))
failed = []
for bm in benchmarks:
if not runBenchmark(bm, harnessArgs, vmOpts):
failed.append(bm)
if len(failed) != 0:
mx.abort('Benchmark failures: ' + str(failed))
def dacapo(args):
"""run one or more DaCapo benchmarks"""
def launcher(bm, harnessArgs, extraVmOpts):
return sanitycheck.getDacapo(bm, harnessArgs).test(_get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, sanitycheck.dacapoSanityWarmup.keys(), launcher)
def scaladacapo(args):
"""run one or more Scala DaCapo benchmarks"""
def launcher(bm, harnessArgs, extraVmOpts):
return sanitycheck.getScalaDacapo(bm, harnessArgs).test(_get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, sanitycheck.dacapoScalaSanityWarmup.keys(), launcher)
def _arch():
machine = platform.uname()[4]
if machine in ['amd64', 'AMD64', 'x86_64', 'i86pc']:
return 'amd64'
if machine in ['sun4v', 'sun4u']:
return 'sparcv9'
if machine == 'i386' and mx.get_os() == 'darwin':
try:
# Support for Snow Leopard and earlier version of MacOSX
if subprocess.check_output(['sysctl', '-n', 'hw.cpu64bit_capable']).strip() == '1':
return 'amd64'
except OSError:
# sysctl is not available
pass
mx.abort('unknown or unsupported architecture: os=' + mx.get_os() + ', machine=' + machine)
def _vmLibDirInJdk(jdk):
"""
Get the directory within a JDK where the server and client
subdirectories are located.
"""
if platform.system() == 'Darwin':
return join(jdk, 'jre', 'lib')
if platform.system() == 'Windows':
return join(jdk, 'jre', 'bin')
return join(jdk, 'jre', 'lib', _arch())
def _vmCfgInJdk(jdk):
"""
Get the jvm.cfg file.
"""
if platform.system() == 'Windows':
return join(jdk, 'jre', 'lib', _arch(), 'jvm.cfg')
return join(_vmLibDirInJdk(jdk), 'jvm.cfg')
def _jdksDir():
return os.path.abspath(join(_installed_jdks if _installed_jdks else _graal_home, 'jdk' + str(mx.java().version)))
def _handle_missing_VM(bld, vm):
mx.log('The ' + bld + ' ' + vm + ' VM has not been created')
if sys.stdout.isatty():
if mx.ask_yes_no('Build it now', 'y'):
with VM(vm, bld):
build([])
return
mx.abort('You need to run "mx --vm ' + vm + ' --vmbuild ' + bld + ' build" to build the selected VM')
def _jdk(build='product', vmToCheck=None, create=False, installGraalJar=True):
"""
Get the JDK into which Graal is installed, creating it first if necessary.
"""
jdk = join(_jdksDir(), build)
if create:
srcJdk = mx.java().jdk
if not exists(jdk):
mx.log('Creating ' + jdk + ' from ' + srcJdk)
shutil.copytree(srcJdk, jdk)
# Make a copy of the default VM so that this JDK can be
# reliably used as the bootstrap for a HotSpot build.
jvmCfg = _vmCfgInJdk(jdk)
if not exists(jvmCfg):
mx.abort(jvmCfg + ' does not exist')
defaultVM = None
jvmCfgLines = []
with open(jvmCfg) as f:
for line in f:
if line.startswith('-') and defaultVM is None:
parts = line.split()
if len(parts) == 2:
assert parts[1] == 'KNOWN', parts[1]
defaultVM = parts[0][1:]
jvmCfgLines += ['# default VM is a copy of the unmodified ' + defaultVM + ' VM\n']
jvmCfgLines += ['-original KNOWN\n']
else:
# skip lines which we cannot parse (e.g. '-hotspot ALIASED_TO -client')
mx.log("WARNING: skipping not parsable line \"" + line + "\"")
else:
jvmCfgLines += [line]
assert defaultVM is not None, 'Could not find default VM in ' + jvmCfg
if mx.get_os() != 'windows':
chmodRecursive(jdk, JDK_UNIX_PERMISSIONS)
shutil.move(join(_vmLibDirInJdk(jdk), defaultVM), join(_vmLibDirInJdk(jdk), 'original'))
with open(jvmCfg, 'w') as fp:
for line in jvmCfgLines:
fp.write(line)
# patch 'release' file (append graalvm revision)
releaseFile = join(jdk, 'release')
if exists(releaseFile):
releaseFileLines = []
with open(releaseFile) as f:
for line in f:
releaseFileLines.append(line)
with open(releaseFile, 'w') as fp:
for line in releaseFileLines:
if line.startswith("SOURCE="):
try:
sourceLine = line[0:-2] # remove last char
hgcfg = mx.HgConfig()
hgcfg.check()
revision = hgcfg.tip('.')[:12] # take first 12 chars
fp.write(sourceLine + ' graal:' + revision + '\"\n')
except:
fp.write(line)
else:
fp.write(line)
# Install a copy of the disassembler library
try:
hsdis([], copyToDir=_vmLibDirInJdk(jdk))
except SystemExit:
pass
else:
if not exists(jdk):
if _installed_jdks:
mx.log("The selected JDK directory does not (yet) exist: " + jdk)
_handle_missing_VM(build, vmToCheck if vmToCheck else 'graal')
if installGraalJar:
_installGraalJarInJdks(mx.distribution('GRAAL'))
_installGraalJarInJdks(mx.distribution('GRAAL_LOADER'))
if vmToCheck is not None:
jvmCfg = _vmCfgInJdk(jdk)
found = False
with open(jvmCfg) as f:
for line in f:
if line.strip() == '-' + vmToCheck + ' KNOWN':
found = True
break
if not found:
_handle_missing_VM(build, vmToCheck)
return jdk
def _updateInstalledGraalOptionsFile(jdk):
graalOptions = join(_graal_home, 'graal.options')
jreLibDir = join(jdk, 'jre', 'lib')
if exists(graalOptions):
shutil.copy(graalOptions, join(jreLibDir, 'graal.options'))
else:
toDelete = join(jreLibDir, 'graal.options')
if exists(toDelete):
os.unlink(toDelete)
def _makeHotspotGeneratedSourcesDir():
hsSrcGenDir = join(mx.project('com.oracle.graal.hotspot').source_gen_dir(), 'hotspot')
if not exists(hsSrcGenDir):
os.makedirs(hsSrcGenDir)
return hsSrcGenDir
def _update_graalRuntime_inline_hpp(graalJar):
p = mx.project('com.oracle.graal.hotspot.sourcegen')
mainClass = 'com.oracle.graal.hotspot.sourcegen.GenGraalRuntimeInlineHpp'
if exists(join(p.output_dir(), mainClass.replace('.', os.sep) + '.class')):
graalRuntime_inline_hpp = join(_makeHotspotGeneratedSourcesDir(), 'graalRuntime.inline.hpp')
tmp = StringIO.StringIO()
mx.run_java(['-cp', '{}{}{}'.format(graalJar, os.pathsep, p.output_dir()), mainClass], out=tmp.write)
mx.update_file(graalRuntime_inline_hpp, tmp.getvalue())
def _checkVMIsNewerThanGeneratedSources(jdk, vm, bld):
if isGraalEnabled(vm) and (not _installed_jdks or _installed_jdks == _graal_home):
vmLib = mx.TimeStampFile(join(_vmLibDirInJdk(jdk), vm, mx.add_lib_prefix(mx.add_lib_suffix('jvm'))))
for name in ['graalRuntime.inline.hpp', 'HotSpotVMConfig.inline.hpp']:
genSrc = join(_makeHotspotGeneratedSourcesDir(), name)
if vmLib.isOlderThan(genSrc):
mx.log('The VM ' + vmLib.path + ' is older than ' + genSrc)
mx.abort('You need to run "mx --vm ' + vm + ' --vmbuild ' + bld + ' build"')
def _installGraalJarInJdks(graalDist):
graalJar = graalDist.path
if graalJar.endswith('graal.jar'):
_update_graalRuntime_inline_hpp(graalJar)
jdks = _jdksDir()
if exists(jdks):
for e in os.listdir(jdks):
jreLibDir = join(jdks, e, 'jre', 'lib')
if exists(jreLibDir):
def install(srcJar, dstDir):
name = os.path.basename(srcJar)
dstJar = join(dstDir, name)
if mx.get_env('SYMLINK_GRAAL_JAR', None) == 'true':
# Using symlinks is much faster than copying but may
# cause issues if graal.jar is being updated while
# the VM is running.
if not os.path.islink(dstJar) or not os.path.realpath(dstJar) == srcJar:
if exists(dstJar):
os.remove(dstJar)
os.symlink(srcJar, dstJar)
else:
# do a copy and then a move to get atomic updating (on Unix)
fd, tmp = tempfile.mkstemp(suffix='', prefix=name, dir=dstDir)
shutil.copyfile(srcJar, tmp)
os.close(fd)
shutil.move(tmp, dstJar)
os.chmod(dstJar, JDK_UNIX_PERMISSIONS)
install(graalJar, jreLibDir)
if graalDist.sourcesPath:
install(graalDist.sourcesPath, join(jdks, e))
# run a command in the windows SDK Debug Shell
def _runInDebugShell(cmd, workingDir, logFile=None, findInOutput=None, respondTo=None):
if respondTo is None:
respondTo = {}
newLine = os.linesep
startToken = 'RUNINDEBUGSHELL_STARTSEQUENCE'
endToken = 'RUNINDEBUGSHELL_ENDSEQUENCE'
winSDK = mx.get_env('WIN_SDK', 'C:\\Program Files\\Microsoft SDKs\\Windows\\v7.1\\')
if not exists(winSDK):
mx.abort("Could not find Windows SDK : '" + winSDK + "' does not exist")
if not exists(join(winSDK, 'Bin', 'SetEnv.cmd')):
mx.abort("Invalid Windows SDK path (" + winSDK + ") : could not find Bin/SetEnv.cmd (you can use the WIN_SDK environment variable to specify an other path)")
p = subprocess.Popen('cmd.exe /E:ON /V:ON /K ""' + winSDK + '/Bin/SetEnv.cmd" & echo ' + startToken + '"', \
shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
stdout = p.stdout
stdin = p.stdin
if logFile:
log = open(logFile, 'w')
ret = False
while True:
# encoding may be None on windows plattforms
if sys.stdout.encoding is None:
encoding = 'utf-8'
else:
encoding = sys.stdout.encoding
line = stdout.readline().decode(encoding)
if logFile:
log.write(line.encode('utf-8'))
line = line.strip()
mx.log(line)
if line == startToken:
stdin.write('cd /D ' + workingDir + ' & ' + cmd + ' & echo ' + endToken + newLine)
for regex in respondTo.keys():
match = regex.search(line)
if match:
stdin.write(respondTo[regex] + newLine)
if findInOutput:
match = findInOutput.search(line)
if match:
ret = True
if line == endToken:
if not findInOutput:
stdin.write('echo ERRXXX%errorlevel%' + newLine)
else:
break
if line.startswith('ERRXXX'):
if line == 'ERRXXX0':
ret = True
break
stdin.write('exit' + newLine)
if logFile:
log.close()
return ret
def jdkhome(vm=None):
"""return the JDK directory selected for the 'vm' command"""
build = _vmbuild if _vmSourcesAvailable else 'product'
return _jdk(build, installGraalJar=False)
def print_jdkhome(args, vm=None):
"""print the JDK directory selected for the 'vm' command"""
print jdkhome(vm)
def buildvars(args):
"""describe the variables that can be set by the -D option to the 'mx build' commmand"""
buildVars = {
'ALT_BOOTDIR' : 'The location of the bootstrap JDK installation (default: ' + mx.java().jdk + ')',
'ALT_OUTPUTDIR' : 'Build directory',
'HOTSPOT_BUILD_JOBS' : 'Number of CPUs used by make (default: ' + str(multiprocessing.cpu_count()) + ')',
'INSTALL' : 'Install the built VM into the JDK? (default: y)',
'ZIP_DEBUGINFO_FILES' : 'Install zipped debug symbols file? (default: 0)',
}
mx.log('HotSpot build variables that can be set by the -D option to "mx build":')
mx.log('')
for n in sorted(buildVars.iterkeys()):
mx.log(n)
mx.log(textwrap.fill(buildVars[n], initial_indent=' ', subsequent_indent=' ', width=200))
mx.log('')
mx.log('Note that these variables can be given persistent values in the file ' + join(_graal_home, 'mx', 'env') + ' (see \'mx about\').')
cached_graal_version = None
def graal_version(dev_suffix='dev'):
global cached_graal_version
if not cached_graal_version:
# extract latest release tag for graal
try:
tags = [x.split() for x in subprocess.check_output(['hg', '-R', _graal_home, 'tags']).split('\n') if x.startswith("graal-")]
current_revision = subprocess.check_output(['hg', '-R', _graal_home, 'id', '-i']).strip()
except:
# not a mercurial repository or hg commands are not available.
tags = None
if tags and current_revision:
sorted_tags = sorted(tags, key=lambda e: [int(x) for x in e[0][len("graal-"):].split('.')], reverse=True)
most_recent_tag_name, most_recent_tag_revision = sorted_tags[0]
most_recent_tag_version = most_recent_tag_name[len("graal-"):]
if current_revision == most_recent_tag_revision:
cached_graal_version = most_recent_tag_version
else:
major, minor = map(int, most_recent_tag_version.split('.'))
cached_graal_version = str(major) + '.' + str(minor + 1) + '-' + dev_suffix
else:
cached_graal_version = 'unknown-{}-{}'.format(platform.node(), time.strftime('%Y-%m-%d_%H-%M-%S_%Z'))
return cached_graal_version
def build(args, vm=None):
"""build the VM binary
The global '--vm' and '--vmbuild' options select which VM type and build target to build."""
# Override to fail quickly if extra arguments are given
# at the end of the command line. This allows for a more
# helpful error message.
class AP(ArgumentParser):
def __init__(self):
ArgumentParser.__init__(self, prog='mx build')
def parse_args(self, args):
result = ArgumentParser.parse_args(self, args)
if len(result.remainder) != 0:
firstBuildTarget = result.remainder[0]
mx.abort('To specify the ' + firstBuildTarget + ' VM build target, you need to use the global "--vmbuild" option. For example:\n' +
' mx --vmbuild ' + firstBuildTarget + ' build')
return result
# Call mx.build to compile the Java sources
parser = AP()
parser.add_argument('--export-dir', help='directory to which graal.jar and graal.options will be copied', metavar='<path>')
parser.add_argument('-D', action='append', help='set a HotSpot build variable (run \'mx buildvars\' to list variables)', metavar='name=value')
opts2 = mx.build(['--source', '1.7'] + args, parser=parser)
assert len(opts2.remainder) == 0
if opts2.export_dir is not None:
if not exists(opts2.export_dir):
os.makedirs(opts2.export_dir)
else:
assert os.path.isdir(opts2.export_dir), '{} is not a directory'.format(opts2.export_dir)
shutil.copy(mx.distribution('GRAAL').path, opts2.export_dir)
shutil.copy(mx.distribution('GRAAL_LOADER').path, opts2.export_dir)
graalOptions = join(_graal_home, 'graal.options')
if exists(graalOptions):
shutil.copy(graalOptions, opts2.export_dir)
if not _vmSourcesAvailable or not opts2.native:
return
builds = [_vmbuild]
if vm is None:
vm = _get_vm()
if vm == 'original':
pass
elif vm.startswith('server'):
buildSuffix = ''
elif vm.startswith('client'):
buildSuffix = '1'
else:
assert vm == 'graal', vm
buildSuffix = 'graal'
if _installed_jdks and _installed_jdks != _graal_home:
if not mx.ask_yes_no("Warning: building while --installed-jdks is set (" + _installed_jdks + ") is not recommanded - are you sure you want to continue", 'n'):
mx.abort(1)
for build in builds:
if build == 'ide-build-target':
build = os.environ.get('IDE_BUILD_TARGET', None)
if build is None or len(build) == 0:
continue
jdk = _jdk(build, create=True)
if vm == 'original':
if build != 'product':
mx.log('only product build of original VM exists')
continue
if not isVMSupported(vm):
mx.log('The ' + vm + ' VM is not supported on this platform - skipping')
continue
vmDir = join(_vmLibDirInJdk(jdk), vm)
if not exists(vmDir):
if mx.get_os() != 'windows':
chmodRecursive(jdk, JDK_UNIX_PERMISSIONS)
mx.log('Creating VM directory in JDK: ' + vmDir)
os.makedirs(vmDir)
def filterXusage(line):
if not 'Xusage.txt' in line:
sys.stderr.write(line + os.linesep)
# Check if a build really needs to be done
timestampFile = join(vmDir, '.build-timestamp')
if opts2.force or not exists(timestampFile):
mustBuild = True
else:
mustBuild = False
timestamp = os.path.getmtime(timestampFile)
sources = []
for d in ['src', 'make', join('graal', 'com.oracle.graal.hotspot', 'src_gen', 'hotspot')]:
for root, dirnames, files in os.walk(join(_graal_home, d)):
# ignore <graal>/src/share/tools
if root == join(_graal_home, 'src', 'share'):
dirnames.remove('tools')
sources += [join(root, name) for name in files]
for f in sources:
if len(f) != 0 and os.path.getmtime(f) > timestamp:
mustBuild = True
break
if not mustBuild:
mx.logv('[all files in src and make directories are older than ' + timestampFile[len(_graal_home) + 1:] + ' - skipping native build]')
continue
if platform.system() == 'Windows':
compilelogfile = _graal_home + '/graalCompile.log'
mksHome = mx.get_env('MKS_HOME', 'C:\\cygwin\\bin')
variant = {'client': 'compiler1', 'server': 'compiler2'}.get(vm, vm)
project_config = variant + '_' + build
_runInDebugShell('msbuild ' + _graal_home + r'\build\vs-amd64\jvm.vcproj /p:Configuration=' + project_config + ' /target:clean', _graal_home)
winCompileCmd = r'set HotSpotMksHome=' + mksHome + r'& set OUT_DIR=' + jdk + r'& set JAVA_HOME=' + jdk + r'& set path=%JAVA_HOME%\bin;%path%;%HotSpotMksHome%& cd /D "' + _graal_home + r'\make\windows"& call create.bat ' + _graal_home
print winCompileCmd
winCompileSuccess = re.compile(r"^Writing \.vcxproj file:")
if not _runInDebugShell(winCompileCmd, _graal_home, compilelogfile, winCompileSuccess):
mx.log('Error executing create command')
return
winBuildCmd = 'msbuild ' + _graal_home + r'\build\vs-amd64\jvm.vcxproj /p:Configuration=' + project_config + ' /p:Platform=x64'
if not _runInDebugShell(winBuildCmd, _graal_home, compilelogfile):
mx.log('Error building project')
return
else:
cpus = multiprocessing.cpu_count()
makeDir = join(_graal_home, 'make')
runCmd = [mx.gmake_cmd(), '-C', makeDir]
env = os.environ.copy()
# These must be passed as environment variables
env.setdefault('LANG', 'C')
env['JAVA_HOME'] = jdk
def setMakeVar(name, default, env=None):
"""Sets a make variable on the command line to the value
of the variable in 'env' with the same name if defined
and 'env' is not None otherwise to 'default'
"""
runCmd.append(name + '=' + (env.get(name, default) if env else default))
if opts2.D:
for nv in opts2.D:
name, value = nv.split('=', 1)
setMakeVar(name.strip(), value)
setMakeVar('ARCH_DATA_MODEL', '64', env=env)
setMakeVar('HOTSPOT_BUILD_JOBS', str(cpus), env=env)
setMakeVar('ALT_BOOTDIR', mx.java().jdk, env=env)
setMakeVar('MAKE_VERBOSE', 'y' if mx._opts.verbose else '')
if vm.endswith('nograal'):
setMakeVar('INCLUDE_GRAAL', 'false')
setMakeVar('ALT_OUTPUTDIR', join(_graal_home, 'build-nograal', mx.get_os()), env=env)
else:
version = graal_version()
setMakeVar('USER_RELEASE_SUFFIX', 'graal-' + version)
setMakeVar('GRAAL_VERSION', version)
setMakeVar('INCLUDE_GRAAL', 'true')
setMakeVar('INSTALL', 'y', env=env)
if mx.get_os() == 'solaris':
# If using sparcWorks, setup flags to avoid make complaining about CC version
cCompilerVersion = subprocess.Popen('CC -V', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).stderr.readlines()[0]
if cCompilerVersion.startswith('CC: Sun C++'):
compilerRev = cCompilerVersion.split(' ')[3]
setMakeVar('ENFORCE_COMPILER_REV', compilerRev, env=env)
setMakeVar('ENFORCE_CC_COMPILER_REV', compilerRev, env=env)
if build == 'jvmg':
# We want ALL the symbols when debugging on Solaris
setMakeVar('STRIP_POLICY', 'no_strip')
# This removes the need to unzip the *.diz files before debugging in gdb
setMakeVar('ZIP_DEBUGINFO_FILES', '0', env=env)
# Clear these 2 variables as having them set can cause very confusing build problems
env.pop('LD_LIBRARY_PATH', None)
env.pop('CLASSPATH', None)
# Issue an env prefix that can be used to run the make on the command line
if not mx._opts.verbose:
mx.log('--------------- make command line ----------------------')
envPrefix = ' '.join([key + '=' + env[key] for key in env.iterkeys() if not os.environ.has_key(key) or env[key] != os.environ[key]])
if len(envPrefix):
mx.log('env ' + envPrefix + ' \\')
runCmd.append(build + buildSuffix)
if not mx._opts.verbose:
mx.log(' '.join(runCmd))
mx.log('--------------------------------------------------------')
mx.run(runCmd, err=filterXusage, env=env)
jvmCfg = _vmCfgInJdk(jdk)
if not exists(jvmCfg):
mx.abort(jvmCfg + ' does not exist')
prefix = '-' + vm + ' '
vmKnown = prefix + 'KNOWN\n'
lines = []
found = False
with open(jvmCfg) as f:
for line in f:
if line.strip() == vmKnown.strip():
found = True
lines.append(line)
if not found:
mx.log('Appending "' + prefix + 'KNOWN" to ' + jvmCfg)
if mx.get_os() != 'windows':
os.chmod(jvmCfg, JDK_UNIX_PERMISSIONS)
with open(jvmCfg, 'w') as f:
for line in lines:
if line.startswith(prefix):
line = vmKnown
found = True
f.write(line)
if not found:
f.write(vmKnown)
if exists(timestampFile):
os.utime(timestampFile, None)
else:
file(timestampFile, 'a')
def vmg(args):
"""run the debug build of VM selected by the '--vm' option"""
return vm(args, vmbuild='debug')
def vmfg(args):
"""run the fastdebug build of VM selected by the '--vm' option"""
return vm(args, vmbuild='fastdebug')
def _parseVmArgs(args, vm=None, cwd=None, vmbuild=None):
"""run the VM selected by the '--vm' option"""
if vm is None:
vm = _get_vm()
if not isVMSupported(vm):
mx.abort('The ' + vm + ' is not supported on this platform')
if cwd is None:
cwd = _vm_cwd
elif _vm_cwd is not None and _vm_cwd != cwd:
mx.abort("conflicting working directories: do not set --vmcwd for this command")
build = vmbuild if vmbuild is not None else _vmbuild if _vmSourcesAvailable else 'product'
jdk = _jdk(build, vmToCheck=vm, installGraalJar=False)
_updateInstalledGraalOptionsFile(jdk)
_checkVMIsNewerThanGeneratedSources(jdk, vm, build)
mx.expand_project_in_args(args)
if _make_eclipse_launch:
mx.make_eclipse_launch(args, 'graal-' + build, name=None, deps=mx.project('com.oracle.graal.hotspot').all_deps([], True))
if _jacoco == 'on' or _jacoco == 'append':
jacocoagent = mx.library("JACOCOAGENT", True)
# Exclude all compiler tests and snippets
excludes = ['com.oracle.graal.compiler.tests.*', 'com.oracle.graal.jtt.*']
for p in mx.projects():
excludes += _find_classes_with_annotations(p, None, ['@Snippet', '@ClassSubstitution', '@Test'], includeInnerClasses=True).keys()
excludes += p.find_classes_with_matching_source_line(None, lambda line: 'JaCoCo Exclude' in line, includeInnerClasses=True).keys()
includes = ['com.oracle.graal.*']
agentOptions = {
'append' : 'true' if _jacoco == 'append' else 'false',
'bootclasspath' : 'true',
'includes' : ':'.join(includes),
'excludes' : ':'.join(excludes),
'destfile' : 'jacoco.exec'
}
args = ['-javaagent:' + jacocoagent.get_path(True) + '=' + ','.join([k + '=' + v for k, v in agentOptions.items()])] + args
exe = join(jdk, 'bin', mx.exe_suffix('java'))
pfx = _vm_prefix.split() if _vm_prefix is not None else []
if '-version' in args:
ignoredArgs = args[args.index('-version') + 1:]
if len(ignoredArgs) > 0:
mx.log("Warning: The following options will be ignored by the vm because they come after the '-version' argument: " + ' '.join(ignoredArgs))
args = mx.java().processArgs(args)
return (pfx, exe, vm, args, cwd)
def vm(args, vm=None, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, vmbuild=None):
(pfx_, exe_, vm_, args_, cwd) = _parseVmArgs(args, vm, cwd, vmbuild)
return mx.run(pfx_ + [exe_, '-' + vm_] + args_, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd, timeout=timeout)
def _find_classes_with_annotations(p, pkgRoot, annotations, includeInnerClasses=False):
"""
Scan the sources of project 'p' for Java source files containing a line starting with 'annotation'
(ignoring preceding whitespace) and return the fully qualified class name for each Java
source file matched in a list.
"""
matches = lambda line: len([a for a in annotations if line == a or line.startswith(a + '(')]) != 0
return p.find_classes_with_matching_source_line(pkgRoot, matches, includeInnerClasses)
def _extract_VM_args(args, allowClasspath=False, useDoubleDash=False, defaultAllVMArgs=True):
"""
Partitions a command line into a leading sequence of HotSpot VM options and the rest.
"""
for i in range(0, len(args)):
if useDoubleDash:
if args[i] == '--':
vmArgs = args[:i]
remainder = args[i + 1:]
return vmArgs, remainder
else:
if not args[i].startswith('-'):
if i != 0 and (args[i - 1] == '-cp' or args[i - 1] == '-classpath'):
if not allowClasspath:
mx.abort('Cannot supply explicit class path option')
else:
continue
vmArgs = args[:i]
remainder = args[i:]
return vmArgs, remainder
if defaultAllVMArgs:
return args, []
else:
return [], args
def _run_tests(args, harness, annotations, testfile, whitelist, regex):
vmArgs, tests = _extract_VM_args(args)
for t in tests:
if t.startswith('-'):
mx.abort('VM option ' + t + ' must precede ' + tests[0])
candidates = {}
for p in mx.projects_opt_limit_to_suites():
if mx.java().javaCompliance < p.javaCompliance:
continue
for c in _find_classes_with_annotations(p, None, annotations).keys():
candidates[c] = p
classes = []
if len(tests) == 0:
classes = candidates.keys()
projectsCp = mx.classpath([pcp.name for pcp in mx.projects_opt_limit_to_suites() if pcp.javaCompliance <= mx.java().javaCompliance])
else:
projs = set()
if len(tests) == 1 and '#' in tests[0]:
words = tests[0].split('#')
if len(words) != 2:
mx.abort("Method specification is class#method: " + tests[0])
t, method = words
for c, p in candidates.iteritems():
if t in c:
found = True
classes.append(c + '#' + method)
projs.add(p.name)
if not found:
mx.log('warning: no tests matched by substring "' + t)
else:
for t in tests:
if '#' in t:
mx.abort('Method specifications can only be used in a single test: ' + t)
found = False
for c, p in candidates.iteritems():
if t in c:
found = True
classes.append(c)
projs.add(p.name)
if not found:
mx.log('warning: no tests matched by substring "' + t)
projectsCp = mx.classpath(projs)
if whitelist:
classes = [c for c in classes if any((glob.match(c) for glob in whitelist))]
if regex:
classes = [c for c in classes if re.search(regex, c)]
if len(classes) != 0:
f_testfile = open(testfile, 'w')
for c in classes:
f_testfile.write(c + '\n')
f_testfile.close()
harness(projectsCp, vmArgs)
def _unittest(args, annotations, prefixCp="", whitelist=None, verbose=False, enable_timing=False, regex=None, color=False, eager_stacktrace=False, gc_after_test=False):
testfile = os.environ.get('MX_TESTFILE', None)
if testfile is None:
(_, testfile) = tempfile.mkstemp(".testclasses", "graal")
os.close(_)
coreCp = mx.classpath(['com.oracle.graal.test'])
coreArgs = []
if verbose:
coreArgs.append('-JUnitVerbose')
if enable_timing:
coreArgs.append('-JUnitEnableTiming')
if color:
coreArgs.append('-JUnitColor')
if eager_stacktrace:
coreArgs.append('-JUnitEagerStackTrace')
if gc_after_test:
coreArgs.append('-JUnitGCAfterTest')
def harness(projectsCp, vmArgs):
if _get_vm() != 'graal':
prefixArgs = ['-esa', '-ea']
else:
prefixArgs = ['-XX:-BootstrapGraal', '-esa', '-ea']
if gc_after_test:
prefixArgs.append('-XX:-DisableExplicitGC')
with open(testfile) as fp:
testclasses = [l.rstrip() for l in fp.readlines()]
# Remove entries from class path that are in graal.jar and
# run the VM in a mode where application/test classes can
# access core Graal classes.
cp = prefixCp + coreCp + os.pathsep + projectsCp
if isGraalEnabled(_get_vm()):
graalDist = mx.distribution('GRAAL')
graalJarCp = set([d.output_dir() for d in graalDist.sorted_deps()])
cp = os.pathsep.join([e for e in cp.split(os.pathsep) if e not in graalJarCp])
vmArgs = ['-XX:-UseGraalClassLoader'] + vmArgs
if len(testclasses) == 1:
# Execute Junit directly when one test is being run. This simplifies
# replaying the VM execution in a native debugger (e.g., gdb).
vm(prefixArgs + vmArgs + ['-cp', cp, 'com.oracle.graal.test.GraalJUnitCore'] + coreArgs + testclasses)
else:
vm(prefixArgs + vmArgs + ['-cp', cp, 'com.oracle.graal.test.GraalJUnitCore'] + coreArgs + ['@' + testfile])
try:
_run_tests(args, harness, annotations, testfile, whitelist, regex)
finally:
if os.environ.get('MX_TESTFILE') is None:
os.remove(testfile)
_unittestHelpSuffix = """
Unittest options:
--whitelist <file> run only testcases which are included
in the given whitelist
--verbose enable verbose JUnit output
--enable-timing enable JUnit test timing
--regex <regex> run only testcases matching a regular expression
--color enable colors output
--eager-stacktrace print stacktrace eagerly
--gc-after-test force a GC after each test
To avoid conflicts with VM options '--' can be used as delimiter.
If filters are supplied, only tests whose fully qualified name
includes a filter as a substring are run.
For example, this command line:
mx unittest -G:Dump= -G:MethodFilter=BC_aload.* -G:+PrintCFG BC_aload
will run all JUnit test classes that contain 'BC_aload' in their
fully qualified name and will pass these options to the VM:
-G:Dump= -G:MethodFilter=BC_aload.* -G:+PrintCFG
To get around command line length limitations on some OSes, the
JUnit class names to be executed are written to a file that a
custom JUnit wrapper reads and passes onto JUnit proper. The
MX_TESTFILE environment variable can be set to specify a
file which will not be deleted once the unittests are done
(unlike the temporary file otherwise used).
As with all other commands, using the global '-v' before 'unittest'
command will cause mx to show the complete command line
it uses to run the VM.
"""
def unittest(args):
"""run the JUnit tests (all testcases){0}"""
parser = ArgumentParser(prog='mx unittest',
description='run the JUnit tests',
add_help=False,
formatter_class=RawDescriptionHelpFormatter,
epilog=_unittestHelpSuffix,
)
parser.add_argument('--whitelist', help='run testcases specified in whitelist only', metavar='<path>')
parser.add_argument('--verbose', help='enable verbose JUnit output', action='store_true')
parser.add_argument('--enable-timing', help='enable JUnit test timing', action='store_true')
parser.add_argument('--regex', help='run only testcases matching a regular expression', metavar='<regex>')
parser.add_argument('--color', help='enable color output', action='store_true')
parser.add_argument('--eager-stacktrace', help='print stacktrace eagerly', action='store_true')
parser.add_argument('--gc-after-test', help='force a GC after each test', action='store_true')
ut_args = []
delimiter = False
# check for delimiter
while len(args) > 0:
arg = args.pop(0)
if arg == '--':
delimiter = True
break
ut_args.append(arg)
if delimiter:
# all arguments before '--' must be recognized
parsed_args = parser.parse_args(ut_args)
else:
# parse all know arguments
parsed_args, args = parser.parse_known_args(ut_args)
if parsed_args.whitelist:
try:
with open(join(_graal_home, parsed_args.whitelist)) as fp:
parsed_args.whitelist = [re.compile(fnmatch.translate(l.rstrip())) for l in fp.readlines() if not l.startswith('#')]
except IOError:
mx.log('warning: could not read whitelist: ' + parsed_args.whitelist)
_unittest(args, ['@Test', '@Parameters'], **parsed_args.__dict__)
def shortunittest(args):
"""alias for 'unittest --whitelist test/whitelist_shortunittest.txt'{0}"""
args = ['--whitelist', 'test/whitelist_shortunittest.txt'] + args
unittest(args)
def buildvms(args):
"""build one or more VMs in various configurations"""
vmsDefault = ','.join(_vmChoices.keys())
vmbuildsDefault = ','.join(_vmbuildChoices)
parser = ArgumentParser(prog='mx buildvms')
parser.add_argument('--vms', help='a comma separated list of VMs to build (default: ' + vmsDefault + ')', metavar='<args>', default=vmsDefault)
parser.add_argument('--builds', help='a comma separated list of build types (default: ' + vmbuildsDefault + ')', metavar='<args>', default=vmbuildsDefault)
parser.add_argument('-n', '--no-check', action='store_true', help='omit running "java -version" after each build')
parser.add_argument('-c', '--console', action='store_true', help='send build output to console instead of log file')
args = parser.parse_args(args)
vms = args.vms.split(',')
builds = args.builds.split(',')
allStart = time.time()
for v in vms:
if not isVMSupported(v):
mx.log('The ' + v + ' VM is not supported on this platform - skipping')
continue
for vmbuild in builds:
if v == 'original' and vmbuild != 'product':
continue
if not args.console:
logFile = join(v + '-' + vmbuild + '.log')
log = open(join(_graal_home, logFile), 'wb')
start = time.time()
mx.log('BEGIN: ' + v + '-' + vmbuild + '\t(see: ' + logFile + ')')
# Run as subprocess so that output can be directed to a file
subprocess.check_call([sys.executable, '-u', join('mxtool', 'mx.py'), '--vm', v, '--vmbuild',
vmbuild, 'build'], cwd=_graal_home, stdout=log, stderr=subprocess.STDOUT)
duration = datetime.timedelta(seconds=time.time() - start)
mx.log('END: ' + v + '-' + vmbuild + '\t[' + str(duration) + ']')
else:
with VM(v, vmbuild):
build([])
if not args.no_check:
vmargs = ['-version']
if v == 'graal':
vmargs.insert(0, '-XX:-BootstrapGraal')
vm(vmargs, vm=v, vmbuild=vmbuild)
allDuration = datetime.timedelta(seconds=time.time() - allStart)
mx.log('TOTAL TIME: ' + '[' + str(allDuration) + ']')
class Task:
def __init__(self, title):
self.start = time.time()
self.title = title
self.end = None
self.duration = None
mx.log(time.strftime('gate: %d %b %Y %H:%M:%S: BEGIN: ') + title)
def stop(self):
self.end = time.time()
self.duration = datetime.timedelta(seconds=self.end - self.start)
mx.log(time.strftime('gate: %d %b %Y %H:%M:%S: END: ') + self.title + ' [' + str(self.duration) + ']')
return self
def abort(self, codeOrMessage):
self.end = time.time()
self.duration = datetime.timedelta(seconds=self.end - self.start)
mx.log(time.strftime('gate: %d %b %Y %H:%M:%S: ABORT: ') + self.title + ' [' + str(self.duration) + ']')
mx.abort(codeOrMessage)
return self
def _basic_gate_body(args, tasks):
t = Task('BuildHotSpotGraal: fastdebug,product')
buildvms(['--vms', 'graal,server', '--builds', 'fastdebug,product'])
tasks.append(t.stop())
with VM('graal', 'fastdebug'):
t = Task('BootstrapWithSystemAssertions:fastdebug')
vm(['-esa', '-XX:-TieredCompilation', '-version'])
tasks.append(t.stop())
with VM('graal', 'fastdebug'):
t = Task('BootstrapWithSystemAssertionsNoCoop:fastdebug')
vm(['-esa', '-XX:-TieredCompilation', '-XX:-UseCompressedOops', '-version'])
tasks.append(t.stop())
with VM('graal', 'product'):
t = Task('BootstrapWithGCVerification:product')
out = mx.DuplicateSuppressingStream(['VerifyAfterGC:', 'VerifyBeforeGC:']).write
vm(['-XX:-TieredCompilation', '-XX:+UnlockDiagnosticVMOptions', '-XX:+VerifyBeforeGC', '-XX:+VerifyAfterGC', '-version'], out=out)
tasks.append(t.stop())
with VM('graal', 'product'):
t = Task('BootstrapWithG1GCVerification:product')
out = mx.DuplicateSuppressingStream(['VerifyAfterGC:', 'VerifyBeforeGC:']).write
vm(['-XX:-TieredCompilation', '-XX:+UnlockDiagnosticVMOptions', '-XX:-UseSerialGC', '-XX:+UseG1GC', '-XX:+VerifyBeforeGC', '-XX:+VerifyAfterGC', '-version'], out=out)
tasks.append(t.stop())
with VM('graal', 'product'):
t = Task('BootstrapWithRegisterPressure:product')
vm(['-XX:-TieredCompilation', '-G:RegisterPressure=rbx,r11,r10,r14,xmm3,xmm11,xmm14', '-esa', '-version'])
tasks.append(t.stop())
with VM('graal', 'product'):
t = Task('BootstrapWithImmutableCode:product')
vm(['-XX:-TieredCompilation', '-G:+ImmutableCode', '-G:+VerifyPhases', '-esa', '-version'])
tasks.append(t.stop())
with VM('server', 'product'): # hosted mode
t = Task('UnitTests:hosted-product')
unittest(['--enable-timing', '--verbose'])
tasks.append(t.stop())
with VM('server', 'product'): # hosted mode
t = Task('UnitTests-BaselineCompiler:hosted-product')
unittest(['--enable-timing', '--verbose', '--whitelist', 'test/whitelist_baseline.txt', '-G:+UseBaselineCompiler'])
tasks.append(t.stop())
for vmbuild in ['fastdebug', 'product']:
for test in sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild) + sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild):
t = Task(str(test) + ':' + vmbuild)
if not test.test('graal'):
t.abort(test.name + ' Failed')
tasks.append(t.stop())
# ensure -Xbatch still works
with VM('graal', 'product'):
t = Task('DaCapo_pmd:BatchMode:product')
dacapo(['-Xbatch', 'pmd'])
tasks.append(t.stop())
# ensure -Xcomp still works
with VM('graal', 'product'):
t = Task('XCompMode:product')
vm(['-Xcomp', '-version'])
tasks.append(t.stop())
if args.jacocout is not None:
jacocoreport([args.jacocout])
global _jacoco
_jacoco = 'off'
t = Task('CleanAndBuildIdealGraphVisualizer')
mx.run(['ant', '-f', join(_graal_home, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml'), '-q', 'clean', 'build'])
tasks.append(t.stop())
# Prevent Graal modifications from breaking the standard builds
if args.buildNonGraal:
t = Task('BuildHotSpotVarieties')
buildvms(['--vms', 'client,server', '--builds', 'fastdebug,product'])
buildvms(['--vms', 'server-nograal', '--builds', 'product'])
buildvms(['--vms', 'server-nograal', '--builds', 'optimized'])
tasks.append(t.stop())
for vmbuild in ['product', 'fastdebug']:
for theVm in ['client', 'server']:
if not isVMSupported(theVm):
mx.log('The' + theVm + ' VM is not supported on this platform')
continue
with VM(theVm, vmbuild):
t = Task('DaCapo_pmd:' + theVm + ':' + vmbuild)
dacapo(['pmd'])
tasks.append(t.stop())
t = Task('UnitTests:' + theVm + ':' + vmbuild)
unittest(['-XX:CompileCommand=exclude,*::run*', 'graal.api'])
tasks.append(t.stop())
def gate(args, gate_body=_basic_gate_body):
"""run the tests used to validate a push
If this command exits with a 0 exit code, then the source code is in
a state that would be accepted for integration into the main repository."""
parser = ArgumentParser(prog='mx gate')
parser.add_argument('-j', '--omit-java-clean', action='store_false', dest='cleanJava', help='omit cleaning Java native code')
parser.add_argument('-n', '--omit-native-clean', action='store_false', dest='cleanNative', help='omit cleaning and building native code')
parser.add_argument('-g', '--only-build-graalvm', action='store_false', dest='buildNonGraal', help='only build the Graal VM')
parser.add_argument('--jacocout', help='specify the output directory for jacoco report')
args = parser.parse_args(args)
global _jacoco
tasks = []
total = Task('Gate')
try:
t = Task('Pylint')
mx.pylint([])
tasks.append(t.stop())
def _clean(name='Clean'):
t = Task(name)
cleanArgs = []
if not args.cleanNative:
cleanArgs.append('--no-native')
if not args.cleanJava:
cleanArgs.append('--no-java')
clean(cleanArgs)
tasks.append(t.stop())
_clean()
t = Task('IDEConfigCheck')
mx.ideclean([])
mx.ideinit([])
tasks.append(t.stop())
eclipse_exe = mx.get_env('ECLIPSE_EXE')
if eclipse_exe is not None:
t = Task('CodeFormatCheck')
if mx.eclipseformat(['-e', eclipse_exe]) != 0:
t.abort('Formatter modified files - run "mx eclipseformat", check in changes and repush')
tasks.append(t.stop())
t = Task('Canonicalization Check')
mx.log(time.strftime('%d %b %Y %H:%M:%S - Ensuring mx/projects files are canonicalized...'))
if mx.canonicalizeprojects([]) != 0:
t.abort('Rerun "mx canonicalizeprojects" and check-in the modified mx/projects files.')
tasks.append(t.stop())
if mx.get_env('JDT'):
t = Task('BuildJavaWithEcj')
build(['-p', '--no-native', '--jdt-warning-as-error'])
tasks.append(t.stop())
_clean('CleanAfterEcjBuild')
t = Task('BuildJavaWithJavac')
build(['-p', '--no-native', '--force-javac'])
tasks.append(t.stop())
t = Task('Checkheaders')
if checkheaders([]) != 0:
t.abort('Checkheaders warnings were found')
tasks.append(t.stop())
t = Task('FindBugs')
if findbugs([]) != 0:
t.abort('FindBugs warnings were found')
tasks.append(t.stop())
if exists('jacoco.exec'):
os.unlink('jacoco.exec')
if args.jacocout is not None:
_jacoco = 'append'
else:
_jacoco = 'off'
gate_body(args, tasks)
except KeyboardInterrupt:
total.abort(1)
except BaseException as e:
import traceback
traceback.print_exc()
total.abort(str(e))
total.stop()
mx.log('Gate task times:')
for t in tasks:
mx.log(' ' + str(t.duration) + '\t' + t.title)
mx.log(' =======')
mx.log(' ' + str(total.duration))
def deoptalot(args):
"""bootstrap a fastdebug Graal VM with DeoptimizeALot and VerifyOops on
If the first argument is a number, the process will be repeated
this number of times. All other arguments are passed to the VM."""
count = 1
if len(args) > 0 and args[0].isdigit():
count = int(args[0])
del args[0]
for _ in range(count):
if not vm(['-XX:+DeoptimizeALot', '-XX:+VerifyOops'] + args + ['-version'], vmbuild='fastdebug') == 0:
mx.abort("Failed")
def longtests(args):
deoptalot(['15', '-Xmx48m'])
dacapo(['100', 'eclipse', '-esa'])
def igv(args):
"""run the Ideal Graph Visualizer"""
with open(join(_graal_home, '.ideal_graph_visualizer.log'), 'w') as fp:
# When the http_proxy environment variable is set, convert it to the proxy settings that ant needs
env = os.environ
proxy = os.environ.get('http_proxy')
if not (proxy is None) and len(proxy) > 0:
if '://' in proxy:
# Remove the http:// prefix (or any other protocol prefix)
proxy = proxy.split('://', 1)[1]
# Separate proxy server name and port number
proxyName, proxyPort = proxy.split(':', 1)
proxyEnv = '-DproxyHost="' + proxyName + '" -DproxyPort=' + proxyPort
env['ANT_OPTS'] = proxyEnv
mx.logv('[Ideal Graph Visualizer log is in ' + fp.name + ']')
nbplatform = join(_graal_home, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'nbplatform')
# Remove NetBeans platform if it is earlier than the current supported version
if exists(nbplatform):
dom = xml.dom.minidom.parse(join(nbplatform, 'platform', 'update_tracking', 'org-netbeans-core.xml'))
currentVersion = mx.VersionSpec(dom.getElementsByTagName('module_version')[0].getAttribute('specification_version'))
supportedVersion = mx.VersionSpec('3.43.1')
if currentVersion < supportedVersion:
mx.log('Replacing NetBeans platform version ' + str(currentVersion) + ' with version ' + str(supportedVersion))
shutil.rmtree(nbplatform)
elif supportedVersion < currentVersion:
mx.log('Supported NetBeans version in igv command should be updated to ' + str(currentVersion))
if not exists(nbplatform):
mx.logv('[This execution may take a while as the NetBeans platform needs to be downloaded]')
mx.run(['ant', '-f', join(_graal_home, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml'), '-l', fp.name, 'run'], env=env)
def install(args):
"""install Truffle into your local Maven repository"""
mx.archive(["@TRUFFLE"])
mx.run(['mvn', 'install:install-file', '-DgroupId=com.oracle', '-DartifactId=truffle', '-Dversion=' + graal_version('SNAPSHOT'), '-Dpackaging=jar', '-Dfile=truffle.jar'])
mx.run(['mvn', 'install:install-file', '-DgroupId=com.oracle', '-DartifactId=truffle-dsl-processor', '-Dversion=' + graal_version('SNAPSHOT'), '-Dpackaging=jar', '-Dfile=truffle-dsl-processor.jar'])
def c1visualizer(args):
"""run the Cl Compiler Visualizer"""
libpath = join(_graal_home, 'lib')
if mx.get_os() == 'windows':
executable = join(libpath, 'c1visualizer', 'bin', 'c1visualizer.exe')
else:
executable = join(libpath, 'c1visualizer', 'bin', 'c1visualizer')
# Check whether the current C1Visualizer installation is the up-to-date
if exists(executable) and not exists(mx.library('C1VISUALIZER_DIST').get_path(resolve=False)):
mx.log('Updating C1Visualizer')
shutil.rmtree(join(libpath, 'c1visualizer'))
archive = mx.library('C1VISUALIZER_DIST').get_path(resolve=True)
if not exists(executable):
zf = zipfile.ZipFile(archive, 'r')
zf.extractall(libpath)
if not exists(executable):
mx.abort('C1Visualizer binary does not exist: ' + executable)
if mx.get_os() != 'windows':
# Make sure that execution is allowed. The zip file does not always specfiy that correctly
os.chmod(executable, 0777)
mx.run([executable])
def bench(args):
"""run benchmarks and parse their output for results
Results are JSON formated : {group : {benchmark : score}}."""
resultFile = None
if '-resultfile' in args:
index = args.index('-resultfile')
if index + 1 < len(args):
resultFile = args[index + 1]
del args[index]
del args[index]
else:
mx.abort('-resultfile must be followed by a file name')
vm = _get_vm()
if len(args) is 0:
args = ['all']
vmArgs = [arg for arg in args if arg.startswith('-')]
def benchmarks_in_group(group):
prefix = group + ':'
return [a[len(prefix):] for a in args if a.startswith(prefix)]
results = {}
benchmarks = []
# DaCapo
if 'dacapo' in args or 'all' in args:
benchmarks += sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Benchmark)
else:
dacapos = benchmarks_in_group('dacapo')
for dacapo in dacapos:
if dacapo not in sanitycheck.dacapoSanityWarmup.keys():
mx.abort('Unknown DaCapo : ' + dacapo)
iterations = sanitycheck.dacapoSanityWarmup[dacapo][sanitycheck.SanityCheckLevel.Benchmark]
if iterations > 0:
benchmarks += [sanitycheck.getDacapo(dacapo, iterations)]
if 'scaladacapo' in args or 'all' in args:
benchmarks += sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Benchmark)
else:
scaladacapos = benchmarks_in_group('scaladacapo')
for scaladacapo in scaladacapos:
if scaladacapo not in sanitycheck.dacapoScalaSanityWarmup.keys():
mx.abort('Unknown Scala DaCapo : ' + scaladacapo)
iterations = sanitycheck.dacapoScalaSanityWarmup[scaladacapo][sanitycheck.SanityCheckLevel.Benchmark]
if iterations > 0:
benchmarks += [sanitycheck.getScalaDacapo(scaladacapo, ['-n', str(iterations)])]
# Bootstrap
if 'bootstrap' in args or 'all' in args:
benchmarks += sanitycheck.getBootstraps()
# SPECjvm2008
if 'specjvm2008' in args or 'all' in args:
benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120'])]
else:
specjvms = benchmarks_in_group('specjvm2008')
for specjvm in specjvms:
benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120', specjvm])]
if 'specjbb2005' in args or 'all' in args:
benchmarks += [sanitycheck.getSPECjbb2005()]
if 'specjbb2013' in args: # or 'all' in args //currently not in default set
benchmarks += [sanitycheck.getSPECjbb2013()]
if 'ctw-full' in args:
benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.Full))
if 'ctw-noinline' in args:
benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.NoInline))
if 'ctw-nocomplex' in args:
benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.NoComplex))
for test in benchmarks:
for (groupName, res) in test.bench(vm, extraVmOpts=vmArgs).items():
group = results.setdefault(groupName, {})
group.update(res)
mx.log(json.dumps(results))
if resultFile:
with open(resultFile, 'w') as f:
f.write(json.dumps(results))
def _get_jmh_path():
path = mx.get_env('JMH_BENCHMARKS', None)
if not path:
probe = join(dirname(_graal_home), 'java-benchmarks')
if exists(probe):
path = probe
if not path:
mx.abort("Please set the JMH_BENCHMARKS environment variable to point to the java-benchmarks workspace")
if not exists(path):
mx.abort("The directory denoted by the JMH_BENCHMARKS environment variable does not exist: " + path)
return path
def makejmhdeps(args):
"""creates and installs Maven dependencies required by the JMH benchmarks
The dependencies are specified by files named pom.mxdeps in the
JMH directory tree. Each such file contains a list of dependencies
defined in JSON format. For example:
'[{"artifactId" : "compiler.test", "groupId" : "com.oracle.graal", "deps" : ["com.oracle.graal.compiler.test"]}]'
will result in a dependency being installed in the local Maven repository
that can be referenced in a pom.xml file as follows:
<dependency>
<groupId>com.oracle.graal</groupId>
<artifactId>compiler.test</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>"""
parser = ArgumentParser(prog='mx makejmhdeps')
parser.add_argument('-s', '--settings', help='alternative path for Maven user settings file', metavar='<path>')
parser.add_argument('-p', '--permissive', action='store_true', help='issue note instead of error if a Maven dependency cannot be built due to missing projects/libraries')
args = parser.parse_args(args)
def makejmhdep(artifactId, groupId, deps):
graalSuite = mx.suite("graal")
path = artifactId + '.jar'
if args.permissive:
for name in deps:
if not mx.project(name, fatalIfMissing=False):
if not mx.library(name, fatalIfMissing=False):
mx.log('Skipping ' + groupId + '.' + artifactId + '.jar as ' + name + ' cannot be resolved')
return
d = mx.Distribution(graalSuite, name=artifactId, path=path, sourcesPath=path, deps=deps, mainClass=None, excludedDependencies=[], distDependencies=[])
d.make_archive()
cmd = ['mvn', 'install:install-file', '-DgroupId=' + groupId, '-DartifactId=' + artifactId,
'-Dversion=1.0-SNAPSHOT', '-Dpackaging=jar', '-Dfile=' + d.path]
if not mx._opts.verbose:
cmd.append('-q')
if args.settings:
cmd = cmd + ['-s', args.settings]
mx.run(cmd)
os.unlink(d.path)
jmhPath = _get_jmh_path()
for root, _, filenames in os.walk(jmhPath):
for f in [join(root, n) for n in filenames if n == 'pom.mxdeps']:
mx.logv('[processing ' + f + ']')
try:
with open(f) as fp:
for d in json.load(fp):
artifactId = d['artifactId']
groupId = d['groupId']
deps = d['deps']
makejmhdep(artifactId, groupId, deps)
except ValueError as e:
mx.abort('Error parsing {}:\n{}'.format(f, e))
def buildjmh(args):
"""build the JMH benchmarks"""
parser = ArgumentParser(prog='mx buildjmh')
parser.add_argument('-s', '--settings', help='alternative path for Maven user settings file', metavar='<path>')
parser.add_argument('-c', action='store_true', dest='clean', help='clean before building')
args = parser.parse_args(args)
jmhPath = _get_jmh_path()
mx.log('JMH benchmarks: ' + jmhPath)
# Ensure the mx injected dependencies are up to date
makejmhdeps(['-p'] + (['-s', args.settings] if args.settings else []))
timestamp = mx.TimeStampFile(join(_graal_home, 'mx', 'jmh', jmhPath.replace(os.sep, '_') + '.timestamp'))
mustBuild = args.clean
if not mustBuild:
try:
hgfiles = [join(jmhPath, f) for f in subprocess.check_output(['hg', '-R', jmhPath, 'locate']).split('\n')]
mustBuild = timestamp.isOlderThan(hgfiles)
except:
# not a Mercurial repository or hg commands are not available.
mustBuild = True
if mustBuild:
buildOutput = []
def _redirect(x):
if mx._opts.verbose:
mx.log(x[:-1])
else:
buildOutput.append(x)
env = os.environ.copy()
env['JAVA_HOME'] = _jdk(vmToCheck='server')
env['MAVEN_OPTS'] = '-server'
mx.log("Building benchmarks...")
cmd = ['mvn']
if args.settings:
cmd = cmd + ['-s', args.settings]
if args.clean:
cmd.append('clean')
cmd.append('package')
retcode = mx.run(cmd, cwd=jmhPath, out=_redirect, env=env, nonZeroIsFatal=False)
if retcode != 0:
mx.log(''.join(buildOutput))
mx.abort(retcode)
timestamp.touch()
else:
mx.logv('[all Mercurial controlled files in ' + jmhPath + ' are older than ' + timestamp.path + ' - skipping build]')
def jmh(args):
"""run the JMH benchmarks
This command respects the standard --vm and --vmbuild options
for choosing which VM to run the benchmarks with."""
if '-h' in args:
mx.help_(['jmh'])
mx.abort(1)
vmArgs, benchmarksAndJsons = _extract_VM_args(args)
benchmarks = [b for b in benchmarksAndJsons if not b.startswith('{')]
jmhArgJsons = [b for b in benchmarksAndJsons if b.startswith('{')]
jmhOutDir = join(_graal_home, 'mx', 'jmh')
if not exists(jmhOutDir):
os.makedirs(jmhOutDir)
jmhOut = join(jmhOutDir, 'jmh.out')
jmhArgs = {'-rff' : jmhOut, '-v' : 'EXTRA' if mx._opts.verbose else 'NORMAL'}
# e.g. '{"-wi" : 20}'
for j in jmhArgJsons:
try:
for n, v in json.loads(j).iteritems():
if v is None:
del jmhArgs[n]
else:
jmhArgs[n] = v
except ValueError as e:
mx.abort('error parsing JSON input: {}\n{}'.format(j, e))
jmhPath = _get_jmh_path()
mx.log('Using benchmarks in ' + jmhPath)
matchedSuites = set()
numBench = [0]
for micros in os.listdir(jmhPath):
absoluteMicro = os.path.join(jmhPath, micros)
if not os.path.isdir(absoluteMicro):
continue
if not micros.startswith("micros-"):
mx.logv('JMH: ignored ' + absoluteMicro + " because it doesn't start with 'micros-'")
continue
microJar = os.path.join(absoluteMicro, "target", "microbenchmarks.jar")
if not exists(microJar):
mx.log('Missing ' + microJar + ' - please run "mx buildjmh"')
continue
if benchmarks:
def _addBenchmark(x):
if x.startswith("Benchmark:"):
return
match = False
for b in benchmarks:
match = match or (b in x)
if match:
numBench[0] += 1
matchedSuites.add(micros)
mx.run_java(['-jar', microJar, "-l"], cwd=jmhPath, out=_addBenchmark, addDefaultArgs=False)
else:
matchedSuites.add(micros)
mx.logv("matchedSuites: " + str(matchedSuites))
plural = 's' if not benchmarks or numBench[0] > 1 else ''
number = str(numBench[0]) if benchmarks else "all"
mx.log("Running " + number + " benchmark" + plural + '...')
regex = []
if benchmarks:
regex.append(r".*(" + "|".join(benchmarks) + ").*")
for suite in matchedSuites:
absoluteMicro = os.path.join(jmhPath, suite)
(pfx, exe, vm, forkedVmArgs, _) = _parseVmArgs(vmArgs)
if pfx:
mx.warn("JMH ignores prefix: \"" + pfx + "\"")
javaArgs = ['-jar', os.path.join(absoluteMicro, "target", "microbenchmarks.jar"),
'--jvm', exe,
'--jvmArgs', ' '.join(["-" + vm] + forkedVmArgs)]
for k, v in jmhArgs.iteritems():
javaArgs.append(k)
if len(str(v)):
javaArgs.append(str(v))
mx.run_java(javaArgs + regex, addDefaultArgs=False, cwd=jmhPath)
def specjvm2008(args):
"""run one or more SPECjvm2008 benchmarks"""
def launcher(bm, harnessArgs, extraVmOpts):
return sanitycheck.getSPECjvm2008(harnessArgs + [bm]).bench(_get_vm(), extraVmOpts=extraVmOpts)
availableBenchmarks = set(sanitycheck.specjvm2008Names)
for name in sanitycheck.specjvm2008Names:
parts = name.rsplit('.', 1)
if len(parts) > 1:
assert len(parts) == 2
group = parts[0]
availableBenchmarks.add(group)
_run_benchmark(args, sorted(availableBenchmarks), launcher)
def specjbb2013(args):
"""runs the composite SPECjbb2013 benchmark"""
def launcher(bm, harnessArgs, extraVmOpts):
assert bm is None
return sanitycheck.getSPECjbb2013(harnessArgs).bench(_get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, None, launcher)
def specjbb2005(args):
"""runs the composite SPECjbb2005 benchmark"""
def launcher(bm, harnessArgs, extraVmOpts):
assert bm is None
return sanitycheck.getSPECjbb2005(harnessArgs).bench(_get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, None, launcher)
def hsdis(args, copyToDir=None):
"""download the hsdis library
This is needed to support HotSpot's assembly dumping features.
By default it downloads the Intel syntax version, use the 'att' argument to install AT&T syntax."""
flavor = 'intel'
if 'att' in args:
flavor = 'att'
lib = mx.add_lib_suffix('hsdis-' + _arch())
path = join(_graal_home, 'lib', lib)
sha1s = {
'att/hsdis-amd64.dll' : 'bcbd535a9568b5075ab41e96205e26a2bac64f72',
'att/hsdis-amd64.so' : '58919ba085d4ef7a513f25bae75e7e54ee73c049',
'intel/hsdis-amd64.dll' : '6a388372cdd5fe905c1a26ced614334e405d1f30',
'intel/hsdis-amd64.so' : '844ed9ffed64fe9599638f29a8450c50140e3192',
'intel/hsdis-amd64.dylib' : 'fdb13ef0d7d23d93dacaae9c98837bea0d4fc5a2',
}
if not exists(path):
flavoredLib = flavor + "/" + lib
sha1 = sha1s[flavoredLib]
sha1path = path + '.sha1'
mx.download_file_with_sha1('hsdis', path, ['http://lafo.ssw.uni-linz.ac.at/hsdis/' + flavoredLib], sha1, sha1path, True, True, sources=False)
if copyToDir is not None and exists(copyToDir):
shutil.copy(path, copyToDir)
def hcfdis(args):
"""disassemble HexCodeFiles embedded in text files
Run a tool over the input files to convert all embedded HexCodeFiles
to a disassembled format."""
parser = ArgumentParser(prog='mx hcfdis')
parser.add_argument('-m', '--map', help='address to symbol map applied to disassembler output')
parser.add_argument('files', nargs=REMAINDER, metavar='files...')
args = parser.parse_args(args)
path = mx.library('HCFDIS').get_path(resolve=True)
mx.run_java(['-cp', path, 'com.oracle.max.hcfdis.HexCodeFileDis'] + args.files)
if args.map is not None:
addressRE = re.compile(r'0[xX]([A-Fa-f0-9]+)')
with open(args.map) as fp:
lines = fp.read().splitlines()
symbols = dict()
for l in lines:
addressAndSymbol = l.split(' ', 1)
if len(addressAndSymbol) == 2:
address, symbol = addressAndSymbol
if address.startswith('0x'):
address = long(address, 16)
symbols[address] = symbol
for f in args.files:
with open(f) as fp:
lines = fp.read().splitlines()
updated = False
for i in range(0, len(lines)):
l = lines[i]
for m in addressRE.finditer(l):
sval = m.group(0)
val = long(sval, 16)
sym = symbols.get(val)
if sym:
l = l.replace(sval, sym)
updated = True
lines[i] = l
if updated:
mx.log('updating ' + f)
with open('new_' + f, "w") as fp:
for l in lines:
print >> fp, l
def jacocoreport(args):
"""create a JaCoCo coverage report
Creates the report from the 'jacoco.exec' file in the current directory.
Default output directory is 'coverage', but an alternative can be provided as an argument."""
jacocoreport = mx.library("JACOCOREPORT", True)
out = 'coverage'
if len(args) == 1:
out = args[0]
elif len(args) > 1:
mx.abort('jacocoreport takes only one argument : an output directory')
mx.run_java(['-jar', jacocoreport.get_path(True), '--in', 'jacoco.exec', '--out', out] + [p.dir for p in mx.projects()])
def sl(args):
"""run an SL program"""
vmArgs, slArgs = _extract_VM_args(args)
vm(vmArgs + ['-cp', mx.classpath(["TRUFFLE", "com.oracle.truffle.sl"]), "com.oracle.truffle.sl.SLMain"] + slArgs)
def isGraalEnabled(vm):
return vm != 'original' and not vm.endswith('nograal')
def jol(args):
"""Java Object Layout"""
joljar = mx.library('JOL_INTERNALS').get_path(resolve=True)
candidates = mx.findclass(args, logToConsole=False, matcher=lambda s, classname: s == classname or classname.endswith('.' + s) or classname.endswith('$' + s))
if len(candidates) > 10:
print "Found %d candidates. Please be more precise." % (len(candidates))
return
vm(['-javaagent:' + joljar, '-cp', os.pathsep.join([mx.classpath(), joljar]), "org.openjdk.jol.MainObjectInternals"] + candidates)
def site(args):
"""create a website containing javadoc and the project dependency graph"""
return mx.site(['--name', 'Graal',
'--jd', '@-tag', '--jd', '@test:X',
'--jd', '@-tag', '--jd', '@run:X',
'--jd', '@-tag', '--jd', '@bug:X',
'--jd', '@-tag', '--jd', '@summary:X',
'--jd', '@-tag', '--jd', '@vmoption:X',
'--overview', join(_graal_home, 'graal', 'overview.html'),
'--title', 'Graal OpenJDK Project Documentation',
'--dot-output-base', 'projects'] + args)
def generateZshCompletion(args):
"""generate zsh completion for mx"""
try:
from genzshcomp import CompletionGenerator
except ImportError:
mx.abort("install genzshcomp (pip install genzshcomp)")
# need to fake module for the custom mx arg parser, otherwise a check in genzshcomp fails
originalModule = mx._argParser.__module__
mx._argParser.__module__ = "argparse"
generator = CompletionGenerator("mx", mx._argParser)
mx._argParser.__module__ = originalModule
# strip last line and define local variable "ret"
complt = "\n".join(generator.get().split('\n')[0:-1]).replace('context state line', 'context state line ret=1')
# add array of possible subcommands (as they are not part of the argument parser)
complt += '\n ": :->command" \\\n'
complt += ' "*::args:->args" && ret=0\n'
complt += '\n'
complt += 'case $state in\n'
complt += '\t(command)\n'
complt += '\t\tlocal -a main_commands\n'
complt += '\t\tmain_commands=(\n'
for cmd in sorted(mx._commands.iterkeys()):
c, _ = mx._commands[cmd][:2]
doc = c.__doc__
complt += '\t\t\t"{0}'.format(cmd)
if doc:
complt += ':{0}'.format(_fixQuotes(doc.split('\n', 1)[0]))
complt += '"\n'
complt += '\t\t)\n'
complt += '\t\t_describe -t main_commands command main_commands && ret=0\n'
complt += '\t\t;;\n'
complt += '\t(args)\n'
# TODO: improve matcher: if mx args are given, this doesn't work
complt += '\t\tcase $line[1] in\n'
complt += '\t\t\t(vm)\n'
complt += '\t\t\t\tnoglob \\\n'
complt += '\t\t\t\t\t_arguments -s -S \\\n'
complt += _appendOptions("graal", r"G\:")
# TODO: fix -XX:{-,+}Use* flags
complt += _appendOptions("hotspot", r"XX\:")
complt += '\t\t\t\t\t"-version" && ret=0 \n'
complt += '\t\t\t\t;;\n'
complt += '\t\tesac\n'
complt += '\t\t;;\n'
complt += 'esac\n'
complt += '\n'
complt += 'return $ret'
print complt
def _fixQuotes(arg):
return arg.replace('\"', '').replace('\'', '').replace('`', '').replace('{', '\\{').replace('}', '\\}').replace('[', '\\[').replace(']', '\\]')
def _appendOptions(optionType, optionPrefix):
def isBoolean(vmap, field):
return vmap[field] == "Boolean" or vmap[field] == "bool"
def hasDescription(vmap):
return vmap['optDefault'] or vmap['optDoc']
complt = ""
for vmap in _parseVMOptions(optionType):
| complt += '\t\t\t\t\t-"' | 8,122 | lcc_e | python | null | 9d961758e8228e308bc1ef5f04f834404c61ebc3304e17d2 |
|
# -*- coding: utf-8 -*-
"""
auibar contains an implementation of AuiToolBar, which is a completely owner-drawn
toolbar perfectly integrated with the AUI layout system. This allows drag and drop of
toolbars, docking/floating behaviour and the possibility to define "overflow" items
in the toolbar itself.
The default theme that is used is L{AuiDefaultToolBarArt}, which provides a modern,
glossy look and feel. The theme can be changed by calling L{AuiToolBar.SetArtProvider}.
"""
__author__ = "Andrea Gavana <andrea.gavana@gmail.com>"
__date__ = "31 March 2009"
import wx
import types
from aui_utilities import BitmapFromBits, StepColour, GetLabelSize
from aui_utilities import GetBaseColour, MakeDisabledBitmap
import framemanager
from aui_constants import *
# AuiToolBar events
wxEVT_COMMAND_AUITOOLBAR_TOOL_DROPDOWN = wx.NewEventType()
wxEVT_COMMAND_AUITOOLBAR_OVERFLOW_CLICK = wx.NewEventType()
wxEVT_COMMAND_AUITOOLBAR_RIGHT_CLICK = wx.NewEventType()
wxEVT_COMMAND_AUITOOLBAR_MIDDLE_CLICK = wx.NewEventType()
wxEVT_COMMAND_AUITOOLBAR_BEGIN_DRAG = wx.NewEventType()
EVT_AUITOOLBAR_TOOL_DROPDOWN = wx.PyEventBinder(wxEVT_COMMAND_AUITOOLBAR_TOOL_DROPDOWN, 1)
""" A dropdown `AuiToolBarItem` is being shown. """
EVT_AUITOOLBAR_OVERFLOW_CLICK = wx.PyEventBinder(wxEVT_COMMAND_AUITOOLBAR_OVERFLOW_CLICK, 1)
""" The user left-clicked on the overflow button in `AuiToolBar`. """
EVT_AUITOOLBAR_RIGHT_CLICK = wx.PyEventBinder(wxEVT_COMMAND_AUITOOLBAR_RIGHT_CLICK, 1)
""" Fires an event when the user right-clicks on a `AuiToolBarItem`. """
EVT_AUITOOLBAR_MIDDLE_CLICK = wx.PyEventBinder(wxEVT_COMMAND_AUITOOLBAR_MIDDLE_CLICK, 1)
""" Fires an event when the user middle-clicks on a `AuiToolBarItem`. """
EVT_AUITOOLBAR_BEGIN_DRAG = wx.PyEventBinder(wxEVT_COMMAND_AUITOOLBAR_BEGIN_DRAG, 1)
""" A drag operation involving a toolbar item has started. """
# ----------------------------------------------------------------------
class CommandToolBarEvent(wx.PyCommandEvent):
""" A specialized command event class for events sent by L{AuiToolBar}. """
def __init__(self, command_type, win_id):
"""
Default class constructor.
:param `command_type`: the event kind or an instance of `wx.PyCommandEvent`.
:param `win_id`: the window identification number.
"""
if type(command_type) == types.IntType:
wx.PyCommandEvent.__init__(self, command_type, win_id)
else:
wx.PyCommandEvent.__init__(self, command_type.GetEventType(), command_type.GetId())
self.is_dropdown_clicked = False
self.click_pt = wx.Point(-1, -1)
self.rect = wx.Rect(-1, -1, 0, 0)
self.tool_id = -1
def Clone(self):
"""
Returns a copy of the event.
Any event that is posted to the wxPython event system for later action (via
`wx.EvtHandler.AddPendingEvent` or `wx.PostEvent`) must implement this method.
All wxPython events fully implement this method, but any derived events
implemented by the user should also implement this method just in case they
(or some event derived from them) are ever posted.
All wxPython events implement a copy constructor, so the easiest way of
implementing the L{Clone} function is to implement a copy constructor for a new
event (call it `MyEvent`) and then define the L{Clone} function like this::
def Clone(self):
return MyEvent(self)
"""
return CommandToolBarEvent(self)
def IsDropDownClicked(self):
""" Returns whether the drop down menu has been clicked. """
return self.is_dropdown_clicked
def SetDropDownClicked(self, c):
"""
Sets whether the drop down menu has been clicked.
:param `c`: ``True`` to set the drop down as clicked, ``False`` otherwise.
"""
self.is_dropdown_clicked = c
def GetClickPoint(self):
""" Returns the point where the user clicked with the mouse. """
return self.click_pt
def SetClickPoint(self, p):
"""
Sets the clicking point.
:param `p`: a `wx.Point` object.
"""
self.click_pt = p
def GetItemRect(self):
""" Returns the L{AuiToolBarItem} rectangle. """
return self.rect
def SetItemRect(self, r):
"""
Sets the L{AuiToolBarItem} rectangle.
:param `r`: an instance of `wx.Rect`.
"""
self.rect = r
def GetToolId(self):
""" Returns the L{AuiToolBarItem} identifier. """
return self.tool_id
def SetToolId(self, id):
"""
Sets the L{AuiToolBarItem} identifier.
:param `id`: the toolbar item identifier.
"""
self.tool_id = id
# ----------------------------------------------------------------------
class AuiToolBarEvent(CommandToolBarEvent):
""" A specialized command event class for events sent by L{AuiToolBar}. """
def __init__(self, command_type=None, win_id=0):
"""
Default class constructor.
:param `command_type`: the event kind or an instance of `wx.PyCommandEvent`.
:param `win_id`: the window identification number.
"""
CommandToolBarEvent.__init__(self, command_type, win_id)
if type(command_type) == types.IntType:
self.notify = wx.NotifyEvent(command_type, win_id)
else:
self.notify = wx.NotifyEvent(command_type.GetEventType(), command_type.GetId())
def Clone(self):
"""
Returns a copy of the event.
Any event that is posted to the wxPython event system for later action (via
`wx.EvtHandler.AddPendingEvent` or `wx.PostEvent`) must implement this method.
All wxPython events fully implement this method, but any derived events
implemented by the user should also implement this method just in case they
(or some event derived from them) are ever posted.
All wxPython events implement a copy constructor, so the easiest way of
implementing the Clone function is to implement a copy constructor for a new
event (call it MyEvent) and then define the Clone function like this::
def Clone(self):
return MyEvent(self)
"""
return AuiToolBarEvent(self)
def GetNotifyEvent(self):
""" Returns the actual `wx.NotifyEvent`. """
return self.notify
def IsAllowed(self):
""" Returns whether the event is allowed or not. """
return self.notify.IsAllowed()
def Veto(self):
"""
Prevents the change announced by this event from happening.
It is in general a good idea to notify the user about the reasons for
vetoing the change because otherwise the applications behaviour (which
just refuses to do what the user wants) might be quite surprising.
"""
self.notify.Veto()
def Allow(self):
"""
This is the opposite of L{Veto}: it explicitly allows the event to be
processed. For most events it is not necessary to call this method as the
events are allowed anyhow but some are forbidden by default (this will
be mentioned in the corresponding event description).
"""
self.notify.Allow()
# ----------------------------------------------------------------------
class ToolbarCommandCapture(wx.PyEvtHandler):
""" A class to handle the dropdown window menu. """
def __init__(self):
""" Default class constructor. """
wx.PyEvtHandler.__init__(self)
self._last_id = 0
def GetCommandId(self):
""" Returns the event command identifier. """
return self._last_id
def ProcessEvent(self, event):
"""
Processes an event, searching event tables and calling zero or more suitable
event handler function(s).
:param `event`: the event to process.
:note: Normally, your application would not call this function: it is called
in the wxPython implementation to dispatch incoming user interface events
to the framework (and application).
However, you might need to call it if implementing new functionality (such as
a new control) where you define new event types, as opposed to allowing the
user to override functions.
An instance where you might actually override the L{ProcessEvent} function is where
you want to direct event processing to event handlers not normally noticed by
wxPython. For example, in the document/view architecture, documents and views
are potential event handlers. When an event reaches a frame, L{ProcessEvent} will
need to be called on the associated document and view in case event handler
functions are associated with these objects.
The normal order of event table searching is as follows:
1. If the object is disabled (via a call to `SetEvtHandlerEnabled`) the function
skips to step (6).
2. If the object is a `wx.Window`, L{ProcessEvent} is recursively called on the window's
`wx.Validator`. If this returns ``True``, the function exits.
3. wxWidgets `SearchEventTable` is called for this event handler. If this fails, the
base class table is tried, and so on until no more tables exist or an appropriate
function was found, in which case the function exits.
4. The search is applied down the entire chain of event handlers (usually the chain
has a length of one). If this succeeds, the function exits.
5. If the object is a `wx.Window` and the event is a `wx.CommandEvent`, L{ProcessEvent} is
recursively applied to the parent window's event handler. If this returns ``True``,
the function exits.
6. Finally, L{ProcessEvent} is called on the `wx.App` object.
"""
if event.GetEventType() == wx.wxEVT_COMMAND_MENU_SELECTED:
self._last_id = event.GetId()
return True
if self.GetNextHandler():
return self.GetNextHandler().ProcessEvent(event)
return False
# ----------------------------------------------------------------------
class AuiToolBarItem(object):
"""
AuiToolBarItem is a toolbar element.
It has a unique id (except for the separators which always have id = -1), the
style (telling whether it is a normal button, separator or a control), the
state (toggled or not, enabled or not) and short and long help strings. The
default implementations use the short help string for the tooltip text which
is popped up when the mouse pointer enters the tool and the long help string
for the applications status bar.
"""
def __init__(self, item=None):
"""
Default class constructor.
:param `item`: another instance of L{AuiToolBarItem}.
"""
if item:
self.Assign(item)
return
self.window = None
self.clockwisebmp = wx.NullBitmap
self.counterclockwisebmp = wx.NullBitmap
self.clockwisedisbmp = wx.NullBitmap
self.counterclockwisedisbmp = wx.NullBitmap
self.sizer_item = None
self.spacer_pixels = 0
self.id = 0
self.kind = ITEM_NORMAL
self.state = 0 # normal, enabled
self.proportion = 0
self.active = True
self.dropdown = True
self.sticky = True
self.user_data = 0
self.label = ""
self.bitmap = wx.NullBitmap
self.disabled_bitmap = wx.NullBitmap
self.hover_bitmap = wx.NullBitmap
self.short_help = ""
self.long_help = ""
self.min_size = wx.Size(-1, -1)
self.alignment = wx.ALIGN_CENTER
self.orientation = AUI_TBTOOL_HORIZONTAL
def Assign(self, c):
"""
Assigns the properties of the L{AuiToolBarItem} `c` to `self`.
:param `c`: another instance of L{AuiToolBarItem}.
"""
self.window = c.window
self.label = c.label
self.bitmap = c.bitmap
self.disabled_bitmap = c.disabled_bitmap
self.hover_bitmap = c.hover_bitmap
self.short_help = c.short_help
self.long_help = c.long_help
self.sizer_item = c.sizer_item
self.min_size = c.min_size
self.spacer_pixels = c.spacer_pixels
self.id = c.id
self.kind = c.kind
self.state = c.state
self.proportion = c.proportion
self.active = c.active
self.dropdown = c.dropdown
self.sticky = c.sticky
self.user_data = c.user_data
self.alignment = c.alignment
self.orientation = c.orientation
def SetWindow(self, w):
"""
Assigns a window to the toolbar item.
:param `w`: an instance of `wx.Window`.
"""
self.window = w
def GetWindow(self):
""" Returns window associated to the toolbar item. """
return self.window
def SetId(self, new_id):
"""
Sets the toolbar item identifier.
:param `new_id`: the new tool id.
"""
self.id = new_id
def GetId(self):
""" Returns the toolbar item identifier. """
return self.id
def SetKind(self, new_kind):
"""
Sets the L{AuiToolBarItem} kind.
:param `new_kind`: can be one of the following items:
======================== =============================
Item Kind Description
======================== =============================
``ITEM_CONTROL`` The item in the AuiToolBar is a control
``ITEM_LABEL`` The item in the AuiToolBar is a text label
``ITEM_SPACER`` The item in the AuiToolBar is a spacer
``ITEM_SEPARATOR`` The item in the AuiToolBar is a separator
``ITEM_CHECK`` The item in the AuiToolBar is a toolbar check item
``ITEM_NORMAL`` The item in the AuiToolBar is a standard toolbar item
``ITEM_RADIO`` The item in the AuiToolBar is a toolbar radio item
======================== =============================
"""
self.kind = new_kind
def GetKind(self):
""" Returns the toolbar item kind. See L{SetKind} for more details. """
return self.kind
def SetState(self, new_state):
"""
Sets the toolbar item state.
:param `new_state`: can be one of the following states:
============================================ ======================================
Button State Constant Description
============================================ ======================================
``AUI_BUTTON_STATE_NORMAL`` Normal button state
``AUI_BUTTON_STATE_HOVER`` Hovered button state
``AUI_BUTTON_STATE_PRESSED`` Pressed button state
``AUI_BUTTON_STATE_DISABLED`` Disabled button state
``AUI_BUTTON_STATE_HIDDEN`` Hidden button state
``AUI_BUTTON_STATE_CHECKED`` Checked button state
============================================ ======================================
"""
self.state = new_state
def GetState(self):
""" Returns the toolbar item state. See L{SetState} for more details. """
return self.state
def SetSizerItem(self, s):
"""
Associates a sizer item to this toolbar item.
:param `s`: an instance of `wx.SizerItem`.
"""
self.sizer_item = s
def GetSizerItem(self):
""" Returns the associated sizer item. """
return self.sizer_item
def SetLabel(self, s):
"""
Sets the toolbar item label.
:param `s`: a string specifying the toolbar item label.
"""
self.label = s
def GetLabel(self):
""" Returns the toolbar item label. """
return self.label
def SetBitmap(self, bmp):
"""
Sets the toolbar item bitmap.
:param `bmp`: an instance of `wx.Bitmap`.
"""
self.bitmap = bmp
def GetBitmap(self):
""" Returns the toolbar item bitmap. """
return self.GetRotatedBitmap(False)
def SetDisabledBitmap(self, bmp):
"""
Sets the toolbar item disabled bitmap.
:param `bmp`: an instance of `wx.Bitmap`.
"""
self.disabled_bitmap = bmp
def GetDisabledBitmap(self):
""" Returns the toolbar item disabled bitmap. """
return self.GetRotatedBitmap(True)
def SetHoverBitmap(self, bmp):
"""
Sets the toolbar item hover bitmap.
:param `bmp`: an instance of `wx.Bitmap`.
"""
self.hover_bitmap = bmp
def SetOrientation(self, a):
"""
Sets the toolbar tool orientation.
:param `a`: one of ``AUI_TBTOOL_HORIZONTAL``, ``AUI_TBTOOL_VERT_CLOCKWISE`` or
``AUI_TBTOOL_VERT_COUNTERCLOCKWISE``.
"""
self.orientation = a
def GetOrientation(self):
""" Returns the toolbar tool orientation. """
return self.orientation
def GetHoverBitmap(self):
""" Returns the toolbar item hover bitmap. """
return self.hover_bitmap
def GetRotatedBitmap(self, disabled):
"""
Returns the correct bitmap depending on the tool orientation.
:param `disabled`: whether to return the disabled bitmap or not.
"""
bitmap_to_rotate = (disabled and [self.disabled_bitmap] or [self.bitmap])[0]
if not bitmap_to_rotate.IsOk() or self.orientation == AUI_TBTOOL_HORIZONTAL:
return bitmap_to_rotate
rotated_bitmap = wx.NullBitmap
clockwise = True
if self.orientation == AUI_TBTOOL_VERT_CLOCKWISE:
rotated_bitmap = (disabled and [self.clockwisedisbmp] or [self.clockwisebmp])[0]
elif self.orientation == AUI_TBTOOL_VERT_COUNTERCLOCKWISE:
rotated_bitmap = (disabled and [self.counterclockwisedisbmp] or [self.counterclockwisebmp])[0]
clockwise = False
if not rotated_bitmap.IsOk():
rotated_bitmap = wx.BitmapFromImage(bitmap_to_rotate.ConvertToImage().Rotate90(clockwise))
return rotated_bitmap
def SetShortHelp(self, s):
"""
Sets the short help string for the L{AuiToolBarItem}, to be displayed in a
`wx.ToolTip` when the mouse hover over the toolbar item.
:param `s`: the tool short help string.
"""
self.short_help = s
def GetShortHelp(self):
""" Returns the short help string for the L{AuiToolBarItem}. """
return self.short_help
def SetLongHelp(self, s):
"""
Sets the long help string for the toolbar item. This string is shown in the
statusbar (if any) of the parent frame when the mouse pointer is inside the
tool.
:param `s`: the tool long help string.
"""
self.long_help = s
def GetLongHelp(self):
""" Returns the long help string for the L{AuiToolBarItem}. """
return self.long_help
def SetMinSize(self, s):
"""
Sets the toolbar item minimum size.
:param `s`: an instance of `wx.Size`.
"""
self.min_size = wx.Size(*s)
def GetMinSize(self):
""" Returns the toolbar item minimum size. """
return self.min_size
def SetSpacerPixels(self, s):
"""
Sets the number of pixels for a toolbar item with kind=``ITEM_SEPARATOR``.
:param `s`: number of pixels.
"""
self.spacer_pixels = s
def GetSpacerPixels(self):
""" Returns the number of pixels for a toolbar item with kind=``ITEM_SEPARATOR``. """
return self.spacer_pixels
def SetProportion(self, p):
"""
Sets the L{AuiToolBarItem} proportion in the toolbar.
:param `p`: the item proportion.
"""
self.proportion = p
def GetProportion(self):
""" Returns the L{AuiToolBarItem} proportion in the toolbar. """
return self.proportion
def SetActive(self, b):
"""
Activates/deactivates the toolbar item.
:param `b`: ``True`` to activate the item, ``False`` to deactivate it.
"""
self.active = b
def IsActive(self):
""" Returns whether the toolbar item is active or not. """
return self.active
def SetHasDropDown(self, b):
"""
Sets whether the toolbar item has an associated dropdown menu.
:param `b`: ``True`` to set a dropdown menu, ``False`` otherwise.
"""
self.dropdown = b
def HasDropDown(self):
""" Returns whether the toolbar item has an associated dropdown menu or not. """
return self.dropdown
def SetSticky(self, b):
"""
Sets whether the toolbar item is sticky (permanent highlight after mouse enter)
or not.
:param `b`: ``True`` to set the item as sticky, ``False`` otherwise.
"""
self.sticky = b
def IsSticky(self):
""" Returns whether the toolbar item has a sticky behaviour or not. """
return self.sticky
def SetUserData(self, l):
"""
Associates some kind of user data to the toolbar item.
:param `l`: a Python object.
:note: The user data can be any Python object.
"""
self.user_data = l
def GetUserData(self):
""" Returns the associated user data. """
return self.user_data
def SetAlignment(self, l):
"""
Sets the toolbar item alignment.
:param `l`: the item alignment, which can be one of the available `wx.Sizer`
alignments.
"""
self.alignment = l
def GetAlignment(self):
""" Returns the toolbar item alignment. """
return self.alignment
# ----------------------------------------------------------------------
class AuiDefaultToolBarArt(object):
"""
Toolbar art provider code - a tab provider provides all drawing functionality to
the L{AuiToolBar}. This allows the L{AuiToolBar} to have a plugable look-and-feel.
By default, a L{AuiToolBar} uses an instance of this class called L{AuiDefaultToolBarArt}
which provides bitmap art and a colour scheme that is adapted to the major platforms'
look. You can either derive from that class to alter its behaviour or write a
completely new tab art class. Call L{AuiToolBar.SetArtProvider} to make use this
new tab art.
"""
def __init__(self):
""" Default class constructor. """
self._base_colour = GetBaseColour()
self._agwFlags = 0
self._text_orientation = AUI_TBTOOL_TEXT_BOTTOM
self._highlight_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)
self._separator_size = 7
self._orientation = AUI_TBTOOL_HORIZONTAL
self._gripper_size = 7
self._overflow_size = 16
darker1_colour = StepColour(self._base_colour, 85)
darker2_colour = StepColour(self._base_colour, 75)
darker3_colour = StepColour(self._base_colour, 60)
darker4_colour = StepColour(self._base_colour, 50)
darker5_colour = StepColour(self._base_colour, 40)
self._gripper_pen1 = wx.Pen(darker5_colour)
self._gripper_pen2 = wx.Pen(darker3_colour)
self._gripper_pen3 = wx.WHITE_PEN
button_dropdown_bits = "\xe0\xf1\xfb"
overflow_bits = "\x80\xff\x80\xc1\xe3\xf7"
self._button_dropdown_bmp = BitmapFromBits(button_dropdown_bits, 5, 3, wx.BLACK)
self._disabled_button_dropdown_bmp = BitmapFromBits(button_dropdown_bits, 5, 3,
wx.Colour(128, 128, 128))
self._overflow_bmp = BitmapFromBits(overflow_bits, 7, 6, wx.BLACK)
self._disabled_overflow_bmp = BitmapFromBits(overflow_bits, 7, 6, wx.Colour(128, 128, 128))
self._font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
def Clone(self):
""" Clones the L{AuiToolBar} art. """
return AuiDefaultToolBarArt()
def SetAGWFlags(self, agwFlags):
"""
Sets the toolbar art flags.
:param `agwFlags`: a combination of the following values:
==================================== ==================================
Flag name Description
==================================== ==================================
``AUI_TB_TEXT`` Shows the text in the toolbar buttons; by default only icons are shown
``AUI_TB_NO_TOOLTIPS`` Don't show tooltips on `AuiToolBar` items
``AUI_TB_NO_AUTORESIZE`` Do not auto-resize the `AuiToolBar`
``AUI_TB_GRIPPER`` Shows a gripper on the `AuiToolBar`
``AUI_TB_OVERFLOW`` The `AuiToolBar` can contain overflow items
``AUI_TB_VERTICAL`` The `AuiToolBar` is vertical
``AUI_TB_HORZ_LAYOUT`` Shows the text and the icons alongside, not vertically stacked. This style must be used with ``AUI_TB_TEXT``.
``AUI_TB_PLAIN_BACKGROUND`` Don't draw a gradient background on the toolbar
``AUI_TB_HORZ_TEXT`` Combination of ``AUI_TB_HORZ_LAYOUT`` and ``AUI_TB_TEXT``
==================================== ==================================
"""
self._agwFlags = agwFlags
def GetAGWFlags(self):
"""
Returns the L{AuiDefaultToolBarArt} flags. See L{SetAGWFlags} for more
details.
:see: L{SetAGWFlags}
"""
return self._agwFlags
def SetFont(self, font):
"""
Sets the L{AuiDefaultToolBarArt} font.
:param `font`: a `wx.Font` object.
"""
self._font = font
def SetTextOrientation(self, orientation):
"""
Sets the text orientation.
:param `orientation`: can be one of the following constants:
==================================== ==================================
Orientation Switches Description
==================================== ==================================
``AUI_TBTOOL_TEXT_LEFT`` Text in AuiToolBar items is aligned left
``AUI_TBTOOL_TEXT_RIGHT`` Text in AuiToolBar items is aligned right
``AUI_TBTOOL_TEXT_TOP`` Text in AuiToolBar items is aligned top
``AUI_TBTOOL_TEXT_BOTTOM`` Text in AuiToolBar items is aligned bottom
==================================== ==================================
"""
self._text_orientation = orientation
def GetFont(self):
""" Returns the L{AuiDefaultToolBarArt} font. """
return self._font
def GetTextOrientation(self):
"""
Returns the L{AuiDefaultToolBarArt} text orientation. See
L{SetTextOrientation} for more details.
:see: L{SetTextOrientation}
"""
return self._text_orientation
def SetOrientation(self, orientation):
"""
Sets the toolbar tool orientation.
:param `orientation`: one of ``AUI_TBTOOL_HORIZONTAL``, ``AUI_TBTOOL_VERT_CLOCKWISE`` or
``AUI_TBTOOL_VERT_COUNTERCLOCKWISE``.
"""
self._orientation = orientation
def GetOrientation(self):
""" Returns the toolbar orientation. """
return self._orientation
def DrawBackground(self, dc, wnd, _rect, horizontal=True):
"""
Draws a toolbar background with a gradient shading.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` derived window;
:param `_rect`: the L{AuiToolBar} rectangle;
:param `horizontal`: ``True`` if the toolbar is horizontal, ``False`` if it is vertical.
"""
rect = wx.Rect(*_rect)
start_colour = StepColour(self._base_colour, 180)
end_colour = StepColour(self._base_colour, 85)
reflex_colour = StepColour(self._base_colour, 95)
dc.GradientFillLinear(rect, start_colour, end_colour,
(horizontal and [wx.SOUTH] or [wx.EAST])[0])
left = rect.GetLeft()
right = rect.GetRight()
top = rect.GetTop()
bottom = rect.GetBottom()
dc.SetPen(wx.Pen(reflex_colour))
if horizontal:
dc.DrawLine(left, bottom, right+1, bottom)
else:
dc.DrawLine(right, top, right, bottom+1)
def DrawPlainBackground(self, dc, wnd, _rect):
"""
Draws a toolbar background with a plain colour.
This method contrasts with the default behaviour of the L{AuiToolBar} that
draws a background gradient and this break the window design when putting
it within a control that has margin between the borders and the toolbar
(example: put L{AuiToolBar} within a `wx.StaticBoxSizer` that has a plain background).
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` derived window;
:param `_rect`: the L{AuiToolBar} rectangle.
"""
rect = wx.Rect(*_rect)
rect.height += 1
dc.SetBrush(wx.Brush(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE)))
dc.DrawRectangle(rect.x - 1, rect.y - 1, rect.width + 2, rect.height + 1)
def DrawLabel(self, dc, wnd, item, rect):
"""
Draws a toolbar item label.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` derived window;
:param `item`: an instance of L{AuiToolBarItem};
:param `rect`: the L{AuiToolBarItem} rectangle.
"""
dc.SetFont(self._font)
dc.SetTextForeground(wx.BLACK)
orient = item.GetOrientation()
horizontal = orient == AUI_TBTOOL_HORIZONTAL
# we only care about the text height here since the text
# will get cropped based on the width of the item
label_size = GetLabelSize(dc, item.GetLabel(), not horizontal)
text_width = label_size.GetWidth()
text_height = label_size.GetHeight()
if orient == AUI_TBTOOL_HORIZONTAL:
text_x = rect.x
text_y = rect.y + (rect.height-text_height)/2
dc.DrawText(item.GetLabel(), text_x, text_y)
elif orient == AUI_TBTOOL_VERT_CLOCKWISE:
text_x = rect.x + (rect.width+text_width)/2
text_y = rect.y
dc.DrawRotatedText(item.GetLabel(), text_x, text_y, 270)
elif AUI_TBTOOL_VERT_COUNTERCLOCKWISE:
text_x = rect.x + (rect.width-text_width)/2
text_y = rect.y + text_height
dc.DrawRotatedText(item.GetLabel(), text_x, text_y, 90)
def DrawButton(self, dc, wnd, item, rect):
"""
Draws a toolbar item button.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` derived window;
:param `item`: an instance of L{AuiToolBarItem};
:param `rect`: the L{AuiToolBarItem} rectangle.
"""
bmp_rect, text_rect = self.GetToolsPosition(dc, item, rect)
if not item.GetState() & AUI_BUTTON_STATE_DISABLED:
if item.GetState() & AUI_BUTTON_STATE_PRESSED:
dc.SetPen(wx.Pen(self._highlight_colour))
dc.SetBrush(wx.Brush(StepColour(self._highlight_colour, 150)))
dc.DrawRectangleRect(rect)
elif item.GetState() & AUI_BUTTON_STATE_HOVER or item.IsSticky():
dc.SetPen(wx.Pen(self._highlight_colour))
dc.SetBrush(wx.Brush(StepColour(self._highlight_colour, 170)))
# draw an even lighter background for checked item hovers (since
# the hover background is the same colour as the check background)
if item.GetState() & AUI_BUTTON_STATE_CHECKED:
dc.SetBrush(wx.Brush(StepColour(self._highlight_colour, 180)))
dc.DrawRectangleRect(rect)
elif item.GetState() & AUI_BUTTON_STATE_CHECKED:
# it's important to put this code in an else statment after the
# hover, otherwise hovers won't draw properly for checked items
dc.SetPen(wx.Pen(self._highlight_colour))
dc.SetBrush(wx.Brush(StepColour(self._highlight_colour, 170)))
dc.DrawRectangleRect(rect)
if item.GetState() & AUI_BUTTON_STATE_DISABLED:
bmp = item.GetDisabledBitmap()
else:
bmp = item.GetBitmap()
if bmp.IsOk():
dc.DrawBitmap(bmp, bmp_rect.x, bmp_rect.y, True)
# set the item's text colour based on if it is disabled
dc.SetTextForeground(wx.BLACK)
if item.GetState() & AUI_BUTTON_STATE_DISABLED:
dc.SetTextForeground(DISABLED_TEXT_COLOUR)
if self._agwFlags & AUI_TB_TEXT and item.GetLabel() != "":
self.DrawLabel(dc, wnd, item, text_rect)
def DrawDropDownButton(self, dc, wnd, item, rect):
"""
Draws a toolbar dropdown button.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` derived window;
:param `item`: an instance of L{AuiToolBarItem};
:param `rect`: the L{AuiToolBarItem} rectangle.
"""
dropbmp_x = dropbmp_y = 0
button_rect = wx.Rect(rect.x, rect.y, rect.width-BUTTON_DROPDOWN_WIDTH, rect.height)
dropdown_rect = wx.Rect(rect.x+rect.width-BUTTON_DROPDOWN_WIDTH-1, rect.y, BUTTON_DROPDOWN_WIDTH+1, rect.height)
horizontal = item.GetOrientation() == AUI_TBTOOL_HORIZONTAL
if horizontal:
button_rect = wx.Rect(rect.x, rect.y, rect.width-BUTTON_DROPDOWN_WIDTH, rect.height)
dropdown_rect = wx.Rect(rect.x+rect.width-BUTTON_DROPDOWN_WIDTH-1, rect.y, BUTTON_DROPDOWN_WIDTH+1, rect.height)
else:
button_rect = wx.Rect(rect.x, rect.y, rect.width, rect.height-BUTTON_DROPDOWN_WIDTH)
dropdown_rect = wx.Rect(rect.x, rect.y+rect.height-BUTTON_DROPDOWN_WIDTH-1, rect.width, BUTTON_DROPDOWN_WIDTH+1)
dropbmp_width = self._button_dropdown_bmp.GetWidth()
dropbmp_height = self._button_dropdown_bmp.GetHeight()
if not horizontal:
tmp = dropbmp_width
dropbmp_width = dropbmp_height
dropbmp_height = tmp
dropbmp_x = dropdown_rect.x + (dropdown_rect.width/2) - dropbmp_width/2
dropbmp_y = dropdown_rect.y + (dropdown_rect.height/2) - dropbmp_height/2
bmp_rect, text_rect = self.GetToolsPosition(dc, item, button_rect)
if item.GetState() & AUI_BUTTON_STATE_PRESSED:
dc.SetPen(wx.Pen(self._highlight_colour))
dc.SetBrush(wx.Brush(StepColour(self._highlight_colour, 140)))
dc.DrawRectangleRect(button_rect)
dc.DrawRectangleRect(dropdown_rect)
elif item.GetState() & AUI_BUTTON_STATE_HOVER or item.IsSticky():
dc.SetPen(wx.Pen(self._highlight_colour))
dc.SetBrush(wx.Brush(StepColour(self._highlight_colour, 170)))
dc.DrawRectangleRect(button_rect)
dc.DrawRectangleRect(dropdown_rect)
elif item.GetState() & AUI_BUTTON_STATE_CHECKED:
# it's important to put this code in an else statment after the
# hover, otherwise hovers won't draw properly for checked items
dc.SetPen(wx.Pen(self._highlight_colour))
dc.SetBrush(wxBrush(StepColour(self._highlight_colour, 170)))
dc.DrawRectangle(button_rect)
dc.DrawRectangle(dropdown_rect)
if item.GetState() & AUI_BUTTON_STATE_DISABLED:
bmp = item.GetDisabledBitmap()
dropbmp = self._disabled_button_dropdown_bmp
else:
bmp = item.GetBitmap()
dropbmp = self._button_dropdown_bmp
if not bmp.IsOk():
return
dc.DrawBitmap(bmp, bmp_rect.x, bmp_rect.y, True)
if horizontal:
dc.DrawBitmap(dropbmp, dropbmp_x, dropbmp_y, True)
else:
dc.DrawBitmap(wx.BitmapFromImage(dropbmp.ConvertToImage().Rotate90(item.GetOrientation() == AUI_TBTOOL_VERT_CLOCKWISE)),
dropbmp_x, dropbmp_y, True)
# set the item's text colour based on if it is disabled
dc.SetTextForeground(wx.BLACK)
if item.GetState() & AUI_BUTTON_STATE_DISABLED:
dc.SetTextForeground(DISABLED_TEXT_COLOUR)
if self._agwFlags & AUI_TB_TEXT and item.GetLabel() != "":
self.DrawLabel(dc, wnd, item, text_rect)
def DrawControlLabel(self, dc, wnd, item, rect):
"""
Draws a label for a toolbar control.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` derived window;
:param `item`: an instance of L{AuiToolBarItem};
:param `rect`: the L{AuiToolBarItem} rectangle.
"""
label_size = GetLabelSize(dc, item.GetLabel(), item.GetOrientation() != AUI_TBTOOL_HORIZONTAL)
text_height = label_size.GetHeight()
text_width = label_size.GetWidth()
dc.SetFont(self._font)
if self._agwFlags & AUI_TB_TEXT:
tx, text_height = dc.GetTextExtent("ABCDHgj")
text_width, ty = dc.GetTextExtent(item.GetLabel())
# don't draw the label if it is wider than the item width
if text_width > rect.width:
return
# set the label's text colour
dc.SetTextForeground(wx.BLACK)
text_x = rect.x + (rect.width/2) - (text_width/2) + 1
text_y = rect.y + rect.height - text_height - 1
if self._agwFlags & AUI_TB_TEXT and item.GetLabel() != "":
dc.DrawText(item.GetLabel(), text_x, text_y)
def GetLabelSize(self, dc, wnd, item):
"""
Returns the label size for a toolbar item.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` derived window;
:param `item`: an instance of L{AuiToolBarItem}.
"""
dc.SetFont(self._font)
label_size = GetLabelSize(dc, item.GetLabel(), self._orientation != AUI_TBTOOL_HORIZONTAL)
return wx.Size(item.GetMinSize().GetWidth(), label_size.GetHeight())
def GetToolSize(self, dc, wnd, item):
"""
Returns the toolbar item size.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` derived window;
:param `item`: an instance of L{AuiToolBarItem}.
"""
if not item.GetBitmap().IsOk() and not self._agwFlags & AUI_TB_TEXT:
return wx.Size(16, 16)
width = item.GetBitmap().GetWidth()
height = item.GetBitmap().GetHeight()
if self._agwFlags & AUI_TB_TEXT:
dc.SetFont(self._font)
label_size = GetLabelSize(dc, item.GetLabel(), self.GetOrientation() != AUI_TBTOOL_HORIZONTAL)
padding = 6
if self._text_orientation == AUI_TBTOOL_TEXT_BOTTOM:
if self.GetOrientation() != AUI_TBTOOL_HORIZONTAL:
height += 3 # space between top border and bitmap
height += 3 # space between bitmap and text
padding = 0
height += label_size.GetHeight()
if item.GetLabel() != "":
width = max(width, label_size.GetWidth()+padding)
elif self._text_orientation == AUI_TBTOOL_TEXT_RIGHT and item.GetLabel() != "":
if self.GetOrientation() == AUI_TBTOOL_HORIZONTAL:
width += 3 # space between left border and bitmap
width += 3 # space between bitmap and text
padding = 0
width += label_size.GetWidth()
height = max(height, label_size.GetHeight()+padding)
# if the tool has a dropdown button, add it to the width
if item.HasDropDown():
if item.GetOrientation() == AUI_TBTOOL_HORIZONTAL:
width += BUTTON_DROPDOWN_WIDTH+4
else:
height += BUTTON_DROPDOWN_WIDTH+4
return wx.Size(width, height)
def DrawSeparator(self, dc, wnd, _rect):
"""
Draws a toolbar separator.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` derived window;
:param `_rect`: the L{AuiToolBarItem} rectangle.
"""
horizontal = True
if self._agwFlags & AUI_TB_VERTICAL:
horizontal = False
rect = wx.Rect(*_rect)
if horizontal:
rect.x += (rect.width/2)
rect.width = 1
new_height = (rect.height*3)/4
rect.y += (rect.height/2) - (new_height/2)
rect.height = new_height
else:
rect.y += (rect.height/2)
rect.height = 1
new_width = (rect.width*3)/4
rect.x += (rect.width/2) - (new_width/2)
rect.width = new_width
start_colour = StepColour(self._base_colour, 80)
end_colour = StepColour(self._base_colour, 80)
dc.GradientFillLinear(rect, start_colour, end_colour, (horizontal and [wx.SOUTH] or [wx.EAST])[0])
def DrawGripper(self, dc, wnd, rect):
"""
Draws the toolbar gripper.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` derived window;
:param `rect`: the L{AuiToolBar} rectangle.
"""
i = 0
while 1:
if self._agwFlags & AUI_TB_VERTICAL:
x = rect.x + (i*4) + 4
y = rect.y + 3
if x > rect.GetWidth() - 4:
break
else:
x = rect.x + 3
y = rect.y + (i*4) + 4
if y > rect.GetHeight() - 4:
break
dc.SetPen(self._gripper_pen1)
dc.DrawPoint(x, y)
dc.SetPen(self._gripper_pen2)
dc.DrawPoint(x, y+1)
dc.DrawPoint(x+1, y)
dc.SetPen(self._gripper_pen3)
dc.DrawPoint(x+2, y+1)
dc.DrawPoint(x+2, y+2)
dc.DrawPoint(x+1, y+2)
i += 1
def DrawOverflowButton(self, dc, wnd, rect, state):
"""
Draws the overflow button for the L{AuiToolBar}.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` derived window;
:param `rect`: the L{AuiToolBar} rectangle;
:param `state`: the overflow button state.
"""
if state & AUI_BUTTON_STATE_HOVER or state & AUI_BUTTON_STATE_PRESSED:
cli_rect = wnd.GetClientRect()
light_gray_bg = StepColour(self._highlight_colour, 170)
if self._agwFlags & AUI_TB_VERTICAL:
dc.SetPen(wx.Pen(self._highlight_colour))
dc.DrawLine(rect.x, rect.y, rect.x+rect.width, rect.y)
dc.SetPen(wx.Pen(light_gray_bg))
dc.SetBrush(wx.Brush(light_gray_bg))
dc.DrawRectangle(rect.x, rect.y+1, rect.width, rect.height)
else:
dc.SetPen(wx.Pen(self._highlight_colour))
dc.DrawLine(rect.x, rect.y, rect.x, rect.y+rect.height)
dc.SetPen(wx.Pen(light_gray_bg))
dc.SetBrush(wx.Brush(light_gray_bg))
dc.DrawRectangle(rect.x+1, rect.y, rect.width, rect.height)
x = rect.x + 1 + (rect.width-self._overflow_bmp.GetWidth())/2
y = rect.y + 1 + (rect.height-self._overflow_bmp.GetHeight())/2
dc.DrawBitmap(self._overflow_bmp, x, y, True)
def GetElementSize(self, element_id):
"""
Returns the size of a UI element in the L{AuiToolBar}.
:param `element_id`: can be one of the following:
==================================== ==================================
Element Identifier Description
==================================== ==================================
``AUI_TBART_SEPARATOR_SIZE`` Separator size in AuiToolBar
``AUI_TBART_GRIPPER_SIZE`` Gripper size in AuiToolBar
``AUI_TBART_OVERFLOW_SIZE`` Overflow button size in AuiToolBar
==================================== ==================================
"""
if element_id == AUI_TBART_SEPARATOR_SIZE:
return self._separator_size
elif element_id == AUI_TBART_GRIPPER_SIZE:
return self._gripper_size
elif element_id == AUI_TBART_OVERFLOW_SIZE:
return self._overflow_size
return 0
def SetElementSize(self, element_id, size):
"""
Sets the size of a UI element in the L{AuiToolBar}.
:param `element_id`: can be one of the following:
==================================== ==================================
Element Identifier Description
==================================== ==================================
``AUI_TBART_SEPARATOR_SIZE`` Separator size in AuiToolBar
``AUI_TBART_GRIPPER_SIZE`` Gripper size in AuiToolBar
``AUI_TBART_OVERFLOW_SIZE`` Overflow button size in AuiToolBar
==================================== ==================================
:param `size`: the new size of the UI element.
"""
if element_id == AUI_TBART_SEPARATOR_SIZE:
self._separator_size = size
elif element_id == AUI_TBART_GRIPPER_SIZE:
self._gripper_size = size
elif element_id == AUI_TBART_OVERFLOW_SIZE:
self._overflow_size = size
def ShowDropDown(self, wnd, items):
"""
Shows the drop down window menu for overflow items.
:param `wnd`: an instance of `wx.Window`;
:param `items`: the overflow toolbar items (a Python list).
"""
menuPopup = wx.Menu()
items_added = 0
for item in items:
if item.GetKind() not in [ITEM_SEPARATOR, ITEM_SPACER, ITEM_CONTROL]:
text = item.GetShortHelp()
if text == "":
text = item.GetLabel()
if text == "":
text = " "
kind = item.GetKind()
m = wx.MenuItem(menuPopup, item.GetId(), text, item.GetShortHelp(), kind)
orientation = item.GetOrientation()
item.SetOrientation(AUI_TBTOOL_HORIZONTAL)
if kind not in [ITEM_CHECK, ITEM_RADIO]:
m.SetBitmap(item.GetBitmap())
item.SetOrientation(orientation)
menuPopup.AppendItem(m)
if kind in [ITEM_CHECK, ITEM_RADIO]:
state = (item.state & AUI_BUTTON_STATE_CHECKED and [True] or [False])[0]
m.Check(state)
items_added += 1
else:
if items_added > 0 and item.GetKind() == ITEM_SEPARATOR:
menuPopup.AppendSeparator()
# find out where to put the popup menu of window items
pt = wx.GetMousePosition()
pt = wnd.ScreenToClient(pt)
# find out the screen coordinate at the bottom of the tab ctrl
cli_rect = wnd.GetClientRect()
pt.y = cli_rect.y + cli_rect.height
cc = ToolbarCommandCapture()
wnd.PushEventHandler(cc)
# Adjustments to get slightly better menu placement
if wx.Platform == "__WXMAC__":
pt.y += 5
pt.x -= 5
wnd.PopupMenu(menuPopup, pt)
command = cc.GetCommandId()
wnd.PopEventHandler(True)
return command
def GetToolsPosition(self, dc, item, rect):
"""
Returns the bitmap and text rectangles for a toolbar item.
:param `dc`: a `wx.DC` device context;
:param `item`: an instance of L{AuiToolBarItem};
:param `rect`: the tool rect.
"""
text_width = text_height = 0
horizontal = self._orientation == AUI_TBTOOL_HORIZONTAL
text_bottom = self._text_orientation == AUI_TBTOOL_TEXT_BOTTOM
text_right = self._text_orientation == AUI_TBTOOL_TEXT_RIGHT
bmp_width = item.GetBitmap().GetWidth()
bmp_height = item.GetBitmap().GetHeight()
if self._agwFlags & AUI_TB_TEXT:
dc.SetFont(self._font)
label_size = GetLabelSize(dc, item.GetLabel(), not horizontal)
text_height = label_size.GetHeight()
text_width = label_size.GetWidth()
bmp_x = bmp_y = text_x = text_y = 0
if horizontal and text_bottom:
bmp_x = rect.x + (rect.width/2) - (bmp_width/2)
bmp_y = rect.y + 3
text_x = rect.x + (rect.width/2) - (text_width/2)
text_y = rect.y + ((bmp_y - rect.y) * 2) + bmp_height
elif horizontal and text_right:
bmp_x = rect.x + 3
bmp_y = rect.y + (rect.height/2) - (bmp_height / 2)
text_x = rect.x + ((bmp_x - rect.x) * 2) + bmp_width
text_y = rect.y + (rect.height/2) - (text_height/2)
elif not horizontal and text_bottom:
bmp_x = rect.x + (rect.width / 2) - (bmp_width / 2)
bmp_y = rect.y + 3
text_x = rect.x + (rect.width / 2) - (text_width / 2)
text_y = rect.y + ((bmp_y - rect.y) * 2) + bmp_height
bmp_rect = wx.Rect(bmp_x, bmp_y, bmp_width, bmp_height)
text_rect = wx.Rect(text_x, text_y, text_width, text_height)
return bmp_rect, text_rect
class AuiToolBar(wx.PyControl):
"""
AuiToolBar is a completely owner-drawn toolbar perfectly integrated with the
AUI layout system. This allows drag and drop of toolbars, docking/floating
behaviour and the possibility to define "overflow" items in the toolbar itself.
The default theme that is used is L{AuiDefaultToolBarArt}, which provides a modern,
glossy look and feel. The theme can be changed by calling L{AuiToolBar.SetArtProvider}.
"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0, agwStyle=AUI_TB_DEFAULT_STYLE):
"""
Default class constructor.
:param `parent`: the L{AuiToolBar} parent;
:param `id`: an identifier for the control: a value of -1 is taken to mean a default;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the control window style;
:param `agwStyle`: the AGW-specific window style. This can be a combination of the
following bits:
==================================== ==================================
Flag name Description
==================================== ==================================
``AUI_TB_TEXT`` Shows the text in the toolbar buttons; by default only icons are shown
``AUI_TB_NO_TOOLTIPS`` Don't show tooltips on `AuiToolBar` items
``AUI_TB_NO_AUTORESIZE`` Do not auto-resize the `AuiToolBar`
``AUI_TB_GRIPPER`` Shows a gripper on the `AuiToolBar`
``AUI_TB_OVERFLOW`` The `AuiToolBar` can contain overflow items
``AUI_TB_VERTICAL`` The `AuiToolBar` is vertical
``AUI_TB_HORZ_LAYOUT`` Shows the text and the icons alongside, not vertically stacked. This style must be used with ``AUI_TB_TEXT``.
``AUI_TB_PLAIN_BACKGROUND`` Don't draw a gradient background on the toolbar
``AUI_TB_HORZ_TEXT`` Combination of ``AUI_TB_HORZ_LAYOUT`` and ``AUI_TB_TEXT``
==================================== ==================================
The default value for `agwStyle` is: ``AUI_TB_DEFAULT_STYLE`` = 0
"""
wx.PyControl.__init__(self, parent, id, pos, size, style|wx.BORDER_NONE)
self._sizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(self._sizer)
self._button_width = -1
self._button_height = -1
self._sizer_element_count = 0
self._action_pos = wx.Point(-1, -1)
self._action_item = None
self._tip_item = None
self._art = AuiDefaultToolBarArt()
self._tool_packing = 2
self._tool_border_padding = 3
self._tool_text_orientation = AUI_TBTOOL_TEXT_BOTTOM
self._tool_orientation = AUI_TBTOOL_HORIZONTAL
self._tool_alignment = wx.EXPAND
self._gripper_sizer_item = None
self._overflow_sizer_item = None
self._dragging = False
self._agwStyle = self._originalStyle = agwStyle
self._gripper_visible = (self._agwStyle & AUI_TB_GRIPPER and [True] or [False])[0]
self._overflow_visible = (self._agwStyle & AUI_TB_OVERFLOW and [True] or [False])[0]
self._overflow_state = 0
self._custom_overflow_prepend = []
self._custom_overflow_append = []
self._items = []
self.SetMargins(5, 5, 2, 2)
self.SetFont(wx.NORMAL_FONT)
self._art.SetAGWFlags(self._agwStyle)
self.SetExtraStyle(wx.WS_EX_PROCESS_IDLE)
if agwStyle & AUI_TB_HORZ_LAYOUT:
self.SetToolTextOrientation(AUI_TBTOOL_TEXT_RIGHT)
elif agwStyle & AUI_TB_VERTICAL:
if agwStyle & AUI_TB_CLOCKWISE:
self.SetToolOrientation(AUI_TBTOOL_VERT_CLOCKWISE)
elif agwStyle & AUI_TB_COUNTERCLOCKWISE:
self.SetToolOrientation(AUI_TBTOOL_VERT_COUNTERCLOCKWISE)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.Bind(wx.EVT_RIGHT_DCLICK, self.OnRightDown)
self.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
self.Bind(wx.EVT_MIDDLE_DOWN, self.OnMiddleDown)
self.Bind(wx.EVT_MIDDLE_DCLICK, self.OnMiddleDown)
self.Bind(wx.EVT_MIDDLE_UP, self.OnMiddleUp)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
self.Bind(wx.EVT_SET_CURSOR, self.OnSetCursor)
def SetWindowStyleFlag(self, style):
"""
Sets the style of the window.
:param `style`: the new window style.
:note: Please note that some styles cannot be changed after the window
creation and that `Refresh` might need to be be called after changing the
others for the change to take place immediately.
:note: Overridden from `wx.PyControl`.
"""
wx.PyControl.SetWindowStyleFlag(self, style|wx.BORDER_NONE)
def SetAGWWindowStyleFlag(self, agwStyle):
"""
Sets the AGW-specific style of the window.
:param `agwStyle`: the new window style. This can be a combination of the
following bits:
==================================== ==================================
Flag name Description
==================================== ==================================
``AUI_TB_TEXT`` Shows the text in the toolbar buttons; by default only icons are shown
``AUI_TB_NO_TOOLTIPS`` Don't show tooltips on `AuiToolBar` items
``AUI_TB_NO_AUTORESIZE`` Do not auto-resize the `AuiToolBar`
``AUI_TB_GRIPPER`` Shows a gripper on the `AuiToolBar`
``AUI_TB_OVERFLOW`` The `AuiToolBar` can contain overflow items
``AUI_TB_VERTICAL`` The `AuiToolBar` is vertical
``AUI_TB_HORZ_LAYOUT`` Shows the text and the icons alongside, not vertically stacked. This style must be used with ``AUI_TB_TEXT``.
``AUI_TB_PLAIN_BACKGROUND`` Don't draw a gradient background on the toolbar
``AUI_TB_HORZ_TEXT`` Combination of ``AUI_TB_HORZ_LAYOUT`` and ``AUI_TB_TEXT``
==================================== ==================================
:note: Please note that some styles cannot be changed after the window
creation and that `Refresh` might need to be be called after changing the
others for the change to take place immediately.
"""
self._agwStyle = self._originalStyle = agwStyle
if self._art:
self._art.SetAGWFlags(self._agwStyle)
if agwStyle & AUI_TB_GRIPPER:
self._gripper_visible = True
else:
self._gripper_visible = False
if agwStyle & AUI_TB_OVERFLOW:
self._overflow_visible = True
else:
self._overflow_visible = False
if agwStyle & AUI_TB_HORZ_LAYOUT:
self.SetToolTextOrientation(AUI_TBTOOL_TEXT_RIGHT)
else:
self.SetToolTextOrientation(AUI_TBTOOL_TEXT_BOTTOM)
if agwStyle & AUI_TB_VERTICAL:
if agwStyle & AUI_TB_CLOCKWISE:
self.SetToolOrientation(AUI_TBTOOL_VERT_CLOCKWISE)
elif agwStyle & AUI_TB_COUNTERCLOCKWISE:
self.SetToolOrientation(AUI_TBTOOL_VERT_COUNTERCLOCKWISE)
def GetAGWWindowStyleFlag(self):
"""
Returns the AGW-specific window style flag.
:see: L{SetAGWWindowStyleFlag} for an explanation of various AGW-specific style,
"""
return self._agwStyle
def SetArtProvider(self, art):
"""
Instructs L{AuiToolBar} to use art provider specified by parameter `art`
for all drawing calls. This allows plugable look-and-feel features.
:param `art`: an art provider.
:note: The previous art provider object, if any, will be deleted by L{AuiToolBar}.
"""
del self._art
self._art = art
if self._art:
self._art.SetAGWFlags(self._agwStyle)
self._art.SetTextOrientation(self._tool_text_orientation)
self._art.SetOrientation(self._tool_orientation)
def GetArtProvider(self):
""" Returns the current art provider being used. """
return self._art
def AddSimpleTool(self, tool_id, label, bitmap, short_help_string="", kind=ITEM_NORMAL):
"""
Adds a tool to the toolbar. This is the simplest method you can use to
ass an item to the L{AuiToolBar}.
:param `tool_id`: an integer by which the tool may be identified in subsequent operations;
:param `label`: the toolbar tool label;
:param `bitmap`: the primary tool bitmap;
:param `short_help_string`: this string is used for the tools tooltip;
:param `kind`: the item kind. Can be one of the following:
======================== =============================
Item Kind Description
======================== =============================
``ITEM_CONTROL`` The item in the AuiToolBar is a control
``ITEM_LABEL`` The item in the AuiToolBar is a text label
``ITEM_SPACER`` The item in the AuiToolBar is a spacer
``ITEM_SEPARATOR`` The item in the AuiToolBar is a separator
``ITEM_CHECK`` The item in the AuiToolBar is a toolbar check item
``ITEM_NORMAL`` The item in the AuiToolBar is a standard toolbar item
``ITEM_RADIO`` The item in the AuiToolBar is a toolbar radio item
======================== =============================
"""
return self.AddTool(tool_id, label, bitmap, wx.NullBitmap, kind, short_help_string, "", None)
def AddToggleTool(self, tool_id, bitmap, disabled_bitmap, toggle=False, client_data=None, short_help_string="", long_help_string=""):
"""
Adds a toggle tool to the toolbar.
:param `tool_id`: an integer by which the tool may be identified in subsequent operations;
:param `bitmap`: the primary tool bitmap;
:param `disabled_bitmap`: the bitmap to use when the tool is disabled. If it is equal to
`wx.NullBitmap`, the disabled bitmap is automatically generated by greing the normal one;
:param `client_data`: whatever Python object to associate with the toolbar item;
:param `short_help_string`: this string is used for the tools tooltip;
:param `long_help_string`: this string is shown in the statusbar (if any) of the parent
frame when the mouse pointer is inside the tool.
"""
kind = (toggle and [ITEM_CHECK] or [ITEM_NORMAL])[0]
return self.AddTool(tool_id, "", bitmap, disabled_bitmap, kind, short_help_string, long_help_string, client_data)
def AddTool(self, tool_id, label, bitmap, disabled_bitmap, kind, short_help_string, long_help_string, client_data):
"""
Adds a tool to the toolbar. This is the full feature version of L{AddTool}.
:param `tool_id`: an integer by which the tool may be identified in subsequent operations;
:param `label`: the toolbar tool label;
:param `bitmap`: the primary tool bitmap;
:param `disabled_bitmap`: the bitmap to use when the tool is disabled. If it is equal to
`wx.NullBitmap`, the disabled bitmap is automatically generated by greing the normal one;
:param `kind`: the item kind. Can be one of the following:
======================== =============================
Item Kind Description
======================== =============================
``ITEM_CONTROL`` The item in the AuiToolBar is a control
``ITEM_LABEL`` The item in the AuiToolBar is a text label
``ITEM_SPACER`` The item in the AuiToolBar is a spacer
``ITEM_SEPARATOR`` The item in the AuiToolBar is a separator
``ITEM_CHECK`` The item in the AuiToolBar is a toolbar check item
``ITEM_NORMAL`` The item in the AuiToolBar is a standard toolbar item
``ITEM_RADIO`` The item in the AuiToolBar is a toolbar radio item
======================== =============================
:param `short_help_string`: this string is used for the tools tooltip;
:param `long_help_string`: this string is shown in the statusbar (if any) of the parent
frame when the mouse pointer is inside the tool.
:param `client_data`: whatever Python object to associate with the toolbar item.
"""
item = AuiToolBarItem()
item.window = None
item.label = label
item.bitmap = bitmap
item.disabled_bitmap = disabled_bitmap
item.short_help = short_help_string
item.long_help = long_help_string
item.active = True
item.dropdown = False
item.spacer_pixels = 0
if tool_id == wx.ID_ANY:
tool_id = wx.NewId()
item.id = tool_id
item.state = 0
item.proportion = 0
item.kind = kind
item.sizer_item = None
item.min_size = wx.Size(-1, -1)
item.user_data = 0
item.sticky = False
item.orientation = self._tool_orientation
if not item.disabled_bitmap.IsOk():
# no disabled bitmap specified, we need to make one
if item.bitmap.IsOk():
item.disabled_bitmap = MakeDisabledBitmap(item.bitmap)
self._items.append(item)
return self._items[-1]
def AddCheckTool(self, tool_id, label, bitmap, disabled_bitmap, short_help_string="", long_help_string="", client_data=None):
"""
Adds a new check (or toggle) tool to the L{AuiToolBar}.
:see: L{AddTool}.
"""
return self.AddTool(tool_id, label, bitmap, disabled_bitmap, ITEM_CHECK, short_help_string, long_help_string, client_data)
def AddRadioTool(self, tool_id, label, bitmap, disabled_bitmap, short_help_string="", long_help_string="", client_data=None):
"""
Adds a new radio tool to the toolbar.
Consecutive radio tools form a radio group such that exactly one button
in the group is pressed at any moment, in other words whenever a button
in the group is pressed the previously pressed button is automatically
released. You should avoid having the radio groups of only one element
as it would be impossible for the user to use such button.
:note: By default, the first button in the radio group is initially pressed,
the others are not.
:see: L{AddTool}.
"""
return self.AddTool(tool_id, label, bitmap, disabled_bitmap, ITEM_RADIO, short_help_string, long_help_string, client_data)
def AddControl(self, control, label=""):
"""
Adds any control to the toolbar, typically e.g. a combobox.
:param `control`: the control to be added;
:param `label`: the label which appears if the control goes into the
overflow items in the toolbar.
"""
item = AuiToolBarItem()
item.window = control
item.label = label
item.bitmap = wx.NullBitmap
item.disabled_bitmap = wx.NullBitmap
item.active = True
item.dropdown = False
item.spacer_pixels = 0
item.id = control.GetId()
item.state = 0
item.proportion = 0
item.kind = ITEM_CONTROL
item.sizer_item = None
item.min_size = control.GetEffectiveMinSize()
item.user_data = 0
item.sticky = False
item.orientation = self._tool_orientation
self._items.append(item)
return self._items[-1]
def AddLabel(self, tool_id, label="", width=0):
"""
Adds a label tool to the L{AuiToolBar}.
:param `tool_id`: an integer by which the tool may be identified in subsequent operations;
:param `label`: the toolbar tool label;
:param `width`: the tool width.
"""
min_size = wx.Size(-1, -1)
if width != -1:
min_size.x = width
item = AuiToolBarItem()
item.window = None
item.label = label
item.bitmap = wx.NullBitmap
item.disabled_bitmap = wx.NullBitmap
item.active = True
item.dropdown = False
item.spacer_pixels = 0
if tool_id == wx.ID_ANY:
tool_id = wx.NewId()
item.id = tool_id
item.state = 0
item.proportion = 0
item.kind = ITEM_LABEL
item.sizer_item = None
item.min_size = min_size
item.user_data = 0
item.sticky = False
item.orientation = self._tool_orientation
self._items.append(item)
return self._items[-1]
def AddSeparator(self):
""" Adds a separator for spacing groups of tools. """
item = AuiToolBarItem()
item.window = None
item.label = ""
item.bitmap = wx.NullBitmap
item.disabled_bitmap = wx.NullBitmap
item.active = True
item.dropdown = False
item.id = -1
item.state = 0
item.proportion = 0
item.kind = ITEM_SEPARATOR
item.sizer_item = None
item.min_size = wx.Size(-1, -1)
item.user_data = 0
item.sticky = False
item.orientation = self._tool_orientation
self._items.append(item)
return self._items[-1]
def AddSpacer(self, pixels):
"""
Adds a spacer for spacing groups of tools.
:param `pixels`: the width of the spacer.
"""
item = AuiToolBarItem()
item.window = None
item.label = ""
item.bitmap = wx.NullBitmap
item.disabled_bitmap = wx.NullBitmap
item.active = True
item.dropdown = False
item.spacer_pixels = pixels
item.id = -1
item.state = 0
item.proportion = 0
item.kind = ITEM_SPACER
item.sizer_item = None
item.min_size = wx.Size(-1, -1)
item.user_data = 0
item.sticky = False
item.orientation = self._tool_orientation
self._items.append(item)
return self._items[-1]
def AddStretchSpacer(self, proportion=1):
"""
Adds a stretchable spacer for spacing groups of tools.
:param `proportion`: the stretchable spacer proportion.
"""
item = AuiToolBarItem()
item.window = None
item.label = ""
item.bitmap = wx.NullBitmap
item.disabled_bitmap = wx.NullBitmap
item.active = True
item.dropdown = False
item.spacer_pixels = 0
item.id = -1
item.state = 0
item.proportion = proportion
item.kind = ITEM_SPACER
item.sizer_item = None
item.min_size = wx.Size(-1, -1)
item.user_data = 0
item.sticky = False
item.orientation = self._tool_orientation
self._items.append(item)
return self._items[-1]
def Clear(self):
""" Deletes all the tools in the L{AuiToolBar}. """
self._items = []
self._sizer_element_count = 0
def ClearTools(self):
""" Deletes all the tools in the L{AuiToolBar}. """
self.Clear()
def DeleteTool(self, tool_id):
"""
Removes the specified tool from the toolbar and deletes it.
:param `tool_id`: the L{AuiToolBarItem} identifier.
:returns: ``True`` if the tool was deleted, ``False`` otherwise.
:note: Note that it is unnecessary to call L{Realize} for the change to
take place, it will happen immediately.
"""
idx = self.GetToolIndex(tool_id)
if idx >= 0 and idx < len(self._items):
self._items.pop(idx)
self.Realize()
return True
return False
def DeleteToolByPos(self, pos):
"""
This function behaves like L{DeleteTool} but it deletes the tool at the
specified position and not the one with the given id.
:param `pos`: the tool position.
:see: L{DeleteTool}
"""
if pos >= 0 and pos < len(self._items):
self._items.pop(pos)
self.Realize()
return True
return False
def FindControl(self, id):
"""
Returns a pointer to the control identified by `id` or ``None`` if no corresponding
control is found.
:param `id`: the control identifier.
"""
wnd = self.FindWindow(id)
return wnd
def FindTool(self, tool_id):
"""
Finds a tool for the given tool id.
:param `tool_id`: the L{AuiToolBarItem} identifier.
"""
for item in self._items:
if item.id == tool_id:
return item
return None
def FindToolForPosition(self, x, y):
"""
Finds a tool for the given mouse position.
:param `x`: mouse `x` position;
:param `y`: mouse `y` position.
:returns: a pointer to a L{AuiToolBarItem} if a tool is found, or ``None`` otherwise.
"""
for i, item in enumerate(self._items):
if not item.sizer_item:
continue
rect = item.sizer_item.GetRect()
if rect.Contains((x,y)):
# if the item doesn't fit on the toolbar, return None
if not self.GetToolFitsByIndex(i):
return None
return item
return None
def FindToolForPositionWithPacking(self, x, y):
"""
Finds a tool for the given mouse position, taking into account also the
tool packing.
:param `x`: mouse `x` position;
:param `y`: mouse `y` position.
:returns: a pointer to a L{AuiToolBarItem} if a tool is found, or ``None`` otherwise.
"""
count = len(self._items)
for i, item in enumerate(self._items):
if not item.sizer_item:
continue
rect = item.sizer_item.GetRect()
# apply tool packing
if i+1 < count:
rect.width += self._tool_packing
if rect.Contains((x,y)):
# if the item doesn't fit on the toolbar, return None
if not self.GetToolFitsByIndex(i):
return None
return item
return None
def FindToolByIndex(self, pos):
"""
Finds a tool for the given tool position in the L{AuiToolBar}.
:param `pos`: the tool position in the toolbar.
:returns: a pointer to a L{AuiToolBarItem} if a tool is found, or ``None`` otherwise.
"""
if pos < 0 or pos >= len(self._items):
return None
return self._items[pos]
def SetToolBitmapSize(self, size):
"""
Sets the default size of each tool bitmap. The default bitmap size is
16 by 15 pixels.
:param `size`: the size of the bitmaps in the toolbar.
:note: This should be called to tell the toolbar what the tool bitmap
size is. Call it before you add tools.
:note: Note that this is the size of the bitmap you pass to L{AddTool},
and not the eventual size of the tool button.
:todo: Add `wx.ToolBar` compatibility, actually implementing this method.
"""
# TODO: wx.ToolBar compatibility
pass
def GetToolBitmapSize(self):
"""
Returns the size of bitmap that the toolbar expects to have. The default
bitmap size is 16 by 15 pixels.
:note: Note that this is the size of the bitmap you pass to L{AddTool},
and not the eventual size of the tool button.
:todo: Add `wx.ToolBar` compatibility, actually implementing this method.
"""
# TODO: wx.ToolBar compatibility
return wx.Size(16, 15)
def SetToolProportion(self, tool_id, proportion):
"""
Sets the tool proportion in the toolbar.
:param `tool_id`: the L{AuiToolBarItem} identifier;
:param `proportion`: the tool proportion in the toolbar.
"""
item = self.FindTool(tool_id)
if not item:
return
item.proportion = proportion
def GetToolProportion(self, tool_id):
"""
Returns the tool proportion in the toolbar.
:param `tool_id`: the L{AuiToolBarItem} identifier.
"""
item = self.FindTool(tool_id)
if not item:
return
return item.proportion
def SetToolSeparation(self, separation):
"""
Sets the separator size for the toolbar.
:param `separation`: the separator size in pixels.
"""
if self._art:
self._art.SetElementSize(AUI_TBART_SEPARATOR_SIZE, separation)
def GetToolSeparation(self):
""" Returns the separator size for the toolbar, in pixels. """
if self._art:
return self._art.GetElementSize(AUI_TBART_SEPARATOR_SIZE)
return 5
def SetToolDropDown(self, tool_id, dropdown):
"""
Assigns a drop down window menu to the toolbar item.
:param `tool_id`: the L{AuiToolBarItem} identifier;
:param `dropdown`: whether to assign a drop down menu or not.
"""
item = self.FindTool(tool_id)
if not item:
return
item.dropdown = dropdown
def GetToolDropDown(self, tool_id):
"""
Returns whether the toolbar item identified by `tool_id` has an associated
drop down window menu or not.
:param `tool_id`: the L{AuiToolBarItem} identifier.
"""
item = self.FindTool(tool_id)
if not item:
return
return item.dropdown
def SetToolSticky(self, tool_id, sticky):
"""
Sets the toolbar item as sticky or non-sticky.
:param `tool_id`: the L{AuiToolBarItem} identifier;
:param `sticky`: whether the tool should be sticky or not.
"""
# ignore separators
if tool_id == -1:
return
item = self.FindTool(tool_id)
if not item:
return
if item.sticky == sticky:
return
item.sticky = sticky
self.Refresh(False)
self.Update()
def GetToolSticky(self, tool_id):
"""
Returns whether the toolbar item identified by `tool_id` has a sticky
behaviour or not.
:param `tool_id`: the L{AuiToolBarItem} identifier.
"""
item = self.FindTool(tool_id)
if not item:
return
return item.sticky
def SetToolBorderPadding(self, padding):
"""
Sets the padding between the tool border and the label.
:param `padding`: the padding in pixels.
"""
self._tool_border_padding = padding
def GetToolBorderPadding(self):
""" Returns the padding between the tool border and the label, in pixels. """
return self._tool_border_padding
def SetToolTextOrientation(self, orientation):
"""
Sets the label orientation for the toolbar items.
:param `orientation`: the L{AuiToolBarItem} label orientation.
"""
self._tool_text_orientation = orientation
if self._art:
self._art.SetTextOrientation(orientation)
def GetToolTextOrientation(self):
""" Returns the label orientation for the toolbar items. """
return self._tool_text_orientation
def SetToolOrientation(self, orientation):
"""
Sets the tool orientation for the toolbar items.
:param `orientation`: the L{AuiToolBarItem} orientation.
"""
self._tool_orientation = orientation
if self._art:
self._art.SetOrientation(orientation)
def GetToolOrientation(self):
""" Returns the orientation for the toolbar items. """
return self._tool_orientation
def SetToolPacking(self, packing):
"""
Sets the value used for spacing tools. The default value is 1.
:param `packing`: the value for packing.
"""
self._tool_packing = packing
def GetToolPacking(self):
""" Returns the value used for spacing tools. The default value is 1. """
return self._tool_packing
def SetOrientation(self, orientation):
"""
Sets the toolbar orientation.
:param `orientation`: either ``wx.VERTICAL`` or ``wx.HORIZONTAL``.
:note: This can be temporarily overridden by L{AuiManager} when floating and
docking a L{AuiToolBar}.
"""
pass
def SetMargins(self, left=-1, right=-1, top=-1, bottom=-1):
"""
Set the values to be used as margins for the toolbar.
:param `left`: the left toolbar margin;
:param `right`: the right toolbar margin;
:param `top`: the top toolbar margin;
:param `bottom`: the bottom toolbar margin.
"""
if left != -1:
self._left_padding = left
if right != -1:
self._right_padding = right
if top != -1:
self._top_padding = top
if bottom != -1:
self._bottom_padding = bottom
def SetMarginsSize(self, size):
"""
Set the values to be used as margins for the toolbar.
:param `size`: the margin size (an instance of `wx.Size`).
"""
self.SetMargins(size.x, size.x, size.y, size.y)
def SetMarginsXY(self, x, y):
"""
Set the values to be used as margins for the toolbar.
:param `x`: left margin, right margin and inter-tool separation value;
:param `y`: top margin, bottom margin and inter-tool separation value.
"""
self.SetMargins(x, x, y, y)
def GetGripperVisible(self):
""" Returns whether the toolbar gripper is visible or not. """
return self._gripper_visible
def SetGripperVisible(self, visible):
"""
Sets whether the toolbar gripper is visible or not.
:param `visible`: ``True`` for a visible gripper, ``False`` otherwise.
"""
self._gripper_visible = visible
if visible:
self._agwStyle |= AUI_TB_GRIPPER
else:
self._agwStyle &= ~AUI_TB_GRIPPER
self.Realize()
self.Refresh(False)
def GetOverflowVisible(self):
""" Returns whether the overflow button is visible or not. """
return self._overflow_visible
def SetOverflowVisible(self, visible):
"""
Sets whether the overflow button is visible or not.
:param `visible`: ``True`` for a visible overflow button, ``False`` otherwise.
"""
self._overflow_visible = visible
if visible:
self._agwStyle |= AUI_TB_OVERFLOW
else:
self._agwStyle &= ~AUI_TB_OVERFLOW
self.Refresh(False)
def SetFont(self, font):
"""
Sets the L{AuiToolBar} font.
:param `font`: a `wx.Font` object.
:note: Overridden from `wx.PyControl`.
"""
res = wx.PyControl.SetFont(self, font)
if self._art:
self._art.SetFont(font)
return res
def SetHoverItem(self, pitem):
"""
Sets a toolbar item to be currently hovered by the mouse.
:param `pitem`: an instance of L{AuiToolBarItem}.
"""
former_hover = None
for item in self._items:
if item.state & AUI_BUTTON_STATE_HOVER:
former_hover = item
item.state &= ~AUI_BUTTON_STATE_HOVER
if pitem:
pitem.state |= AUI_BUTTON_STATE_HOVER
if former_hover != pitem:
self.Refresh(False)
self.Update()
def SetPressedItem(self, pitem):
"""
Sets a toolbar item to be currently in a "pressed" state.
:param `pitem`: an instance of L{AuiToolBarItem}.
"""
former_item = None
for item in self._items:
if item.state & AUI_BUTTON_STATE_PRESSED:
former_item = item
item.state &= ~AUI_BUTTON_STATE_PRESSED
if pitem:
pitem.state &= ~AUI_BUTTON_STATE_HOVER
pitem.state |= AUI_BUTTON_STATE_PRESSED
if former_item != pitem:
self.Refresh(False)
self.Update()
def RefreshOverflowState(self):
""" Refreshes the overflow button. """
if not self._overflow_sizer_item:
self._overflow_state = 0
return
overflow_state = 0
overflow_rect = self.GetOverflowRect()
# find out the mouse's current position
pt = wx.GetMousePosition()
pt = self.ScreenToClient(pt)
# find out if the mouse cursor is inside the dropdown rectangle
if overflow_rect.Contains((pt.x, pt.y)):
if wx.GetMouseState().LeftDown():
overflow_state = AUI_BUTTON_STATE_PRESSED
else:
overflow_state = AUI_BUTTON_STATE_HOVER
if overflow_state != self._overflow_state:
self._overflow_state = overflow_state
self.Refresh(False)
self.Update()
self._overflow_state = overflow_state
def ToggleTool(self, tool_id, state):
"""
Toggles a tool on or off. This does not cause any event to get emitted.
:param `tool_id`: tool in question.
:param `state`: if ``True``, toggles the tool on, otherwise toggles it off.
:note: This only applies to a tool that has been specified as a toggle tool.
"""
tool = self.FindTool(tool_id)
if tool:
if tool.kind not in [ITEM_CHECK, ITEM_RADIO]:
return
if tool.kind == ITEM_RADIO:
idx = self.GetToolIndex(tool_id)
if idx >= 0 and idx < len(self._items):
for i in xrange(idx, len(self._items)):
tool = self.FindToolByIndex(i)
if tool.kind != ITEM_RADIO:
break
tool.state &= ~AUI_BUTTON_STATE_CHECKED
for i in xrange(idx, -1, -1):
tool = self.FindToolByIndex(i)
if tool.kind != ITEM_RADIO:
break
tool.state &= ~AUI_BUTTON_STATE_CHECKED
tool = self.FindTool(tool_id)
tool.state |= AUI_BUTTON_STATE_CHECKED
else:
if state == True:
tool.state |= AUI_BUTTON_STATE_CHECKED
else:
tool.state &= ~AUI_BUTTON_STATE_CHECKED
def GetToolToggled(self, tool_id):
"""
Returns whether a tool is toggled or not.
:param `tool_id`: the toolbar item identifier.
:note: This only applies to a tool that has been specified as a toggle tool.
"""
tool = self.FindTool(tool_id)
if tool:
if tool.kind not in [ITEM_CHECK, ITEM_RADIO]:
return False
return (tool.state & AUI_BUTTON_STATE_CHECKED and [True] or [False])[0]
return False
def EnableTool(self, tool_id, state):
"""
Enables or disables the tool.
:param `tool_id`: identifier for the tool to enable or disable.
:param `state`: if ``True``, enables the tool, otherwise disables it.
"""
tool = self.FindTool(tool_id)
if tool:
if state == True:
tool.state &= ~AUI_BUTTON_STATE_DISABLED
else:
tool.state |= AUI_BUTTON_STATE_DISABLED
def GetToolEnabled(self, tool_id):
"""
Returns whether the tool identified by `tool_id` is enabled or not.
:param `tool_id`: the tool identifier.
"""
tool = self.FindTool(tool_id)
if tool:
return (tool.state & AUI_BUTTON_STATE_DISABLED and [False] or [True])[0]
return False
def GetToolLabel(self, tool_id):
"""
Returns the tool label for the tool identified by `tool_id`.
:param `tool_id`: the tool identifier.
"""
tool = self.FindTool(tool_id)
if not tool:
return ""
return tool.label
def SetToolLabel(self, tool_id, label):
"""
Sets the tool label for the tool identified by `tool_id`.
:param `tool_id`: the tool identifier;
:param `label`: the new toolbar item label.
"""
tool = self.FindTool(tool_id)
if tool:
tool.label = label
def GetToolBitmap(self, tool_id):
"""
Returns the tool bitmap for the tool identified by `tool_id`.
:param `tool_id`: the tool identifier.
"""
tool = self.FindTool(tool_id)
if not tool:
return wx.NullBitmap
return tool.bitmap
def SetToolBitmap(self, tool_id, bitmap):
"""
Sets the tool bitmap for the tool identified by `tool_id`.
:param `tool_id`: the tool identifier;
:param `bitmap`: the new bitmap for the toolbar item.
"""
tool = self.FindTool(tool_id)
if tool:
tool.bitmap = bitmap
def SetToolNormalBitmap(self, tool_id, bitmap):
"""
Sets the tool bitmap for the tool identified by `tool_id`.
:param `tool_id`: the tool identifier;
:param `bitmap`: the new bitmap for the toolbar item.
"""
self.SetToolBitmap(tool_id, bitmap)
def SetToolDisabledBitmap(self, tool_id, bitmap):
"""
Sets the tool disabled bitmap for the tool identified by `tool_id`.
:param `tool_id`: the tool identifier;
:param `bitmap`: the new disabled bitmap for the toolbar item.
"""
tool = self.FindTool(tool_id)
if tool:
tool.disabled_bitmap = bitmap
def GetToolShortHelp(self, tool_id):
"""
Returns the short help for the given tool.
:param `tool_id`: the tool identifier.
"""
tool = self.FindTool(tool_id)
if not tool:
return ""
return tool.short_help
def SetToolShortHelp(self, tool_id, help_string):
"""
Sets the short help for the given tool.
:param `tool_id`: the tool identifier;
:param `help_string`: the string for the short help.
"""
tool = self.FindTool(tool_id)
if tool:
tool.short_help = help_string
def GetToolLongHelp(self, tool_id):
"""
Returns the long help for the given tool.
:param `tool_id`: the tool identifier.
"""
tool = self.FindTool(tool_id)
if not tool:
return ""
return tool.long_help
def SetToolAlignment(self, alignment=wx.EXPAND):
"""
This sets the alignment for all of the tools within the
toolbar (only has an effect when the toolbar is expanded).
:param `alignment`: `wx.Sizer` alignment value
(``wx.ALIGN_CENTER_HORIZONTAL`` or ``wx.ALIGN_CENTER_VERTICAL``).
"""
self._tool_alignment = alignment
def SetToolLongHelp(self, tool_id, help_string):
"""
Sets the long help for the given tool.
:param `tool_id`: the tool identifier;
:param `help_string`: the string for the long help.
"""
tool = self.FindTool(tool_id)
if tool:
tool.long_help = help_string
def SetCustomOverflowItems(self, prepend, append):
"""
Sets the two lists `prepend` and `append` as custom overflow items.
:param `prepend`: a list of L{AuiToolBarItem} to be prepended;
:param `append`: a list of L{AuiToolBarItem} to be appended.
"""
self._custom_overflow_prepend = prepend
self._custom_overflow_append = append
def GetToolCount(self):
""" Returns the number of tools in the L{AuiToolBar}. """
return len(self._items)
def GetToolIndex(self, tool_id):
"""
Returns the position of the tool in the toolbar given its identifier.
:param `tool_id`: the toolbar item identifier.
"""
# this will prevent us from returning the index of the
# first separator in the toolbar since its id is equal to -1
if tool_id == -1:
return wx.NOT_FOUND
for i, item in enumerate(self._items):
if item.id == tool_id:
return i
return wx.NOT_FOUND
def GetToolPos(self, tool_id):
"""
Returns the position of the tool in the toolbar given its identifier.
:param `tool_id`: the toolbar item identifier.
"""
return self.GetToolIndex(tool_id)
def GetToolFitsByIndex(self, tool_idx):
"""
Returns whether the tool identified by `tool_idx` fits into the toolbar or not.
:param `tool_idx`: the toolbar item identifier.
"""
if tool_idx < 0 or tool_idx >= len(self._items):
return False
if not self._items[tool_idx].sizer_item:
return False
cli_w, cli_h = self.GetClientSize()
rect = self._items[tool_idx].sizer_item.GetRect()
if self._agwStyle & AUI_TB_VERTICAL:
# take the dropdown size into account
if self._overflow_visible:
cli_h -= self._overflow_sizer_item.GetSize().y
if rect.y+rect.height < cli_h:
return True
else:
# take the dropdown size into account
if self._overflow_visible:
cli_w -= self._overflow_sizer_item.GetSize().x
if rect.x+rect.width < cli_w:
return True
return False
def GetToolFits(self, tool_id):
"""
Returns whether the tool identified by `tool_id` fits into the toolbar or not.
:param `tool_id`: the toolbar item identifier.
"""
return self.GetToolFitsByIndex(self.GetToolIndex(tool_id))
def GetToolRect(self, tool_id):
"""
Returns the toolbar item rectangle
:param `tool_id`: the toolbar item identifier.
"""
tool = self.FindTool(tool_id)
if tool and tool.sizer_item:
return tool.sizer_item.GetRect()
return wx.Rect()
def GetToolBarFits(self):
""" Returns whether the L{AuiToolBar} size fits in a specified size. """
if len(self._items) == 0:
# empty toolbar always 'fits'
return True
# entire toolbar content fits if the last tool fits
return self.GetToolFitsByIndex(len(self._items) - 1)
def Realize(self):
""" Realizes the toolbar. This function should be called after you have added tools. """
dc = wx.ClientDC(self)
if not dc.IsOk():
return False
horizontal = True
if self._agwStyle & AUI_TB_VERTICAL:
horizontal = False
# create the new sizer to add toolbar elements to
sizer = wx.BoxSizer((horizontal and [wx.HORIZONTAL] or [wx.VERTICAL])[0])
# add gripper area
separator_size = self._art.GetElementSize(AUI_TBART_SEPARATOR_SIZE)
gripper_size = self._art.GetElementSize(AUI_TBART_GRIPPER_SIZE)
if gripper_size > 0 and self._gripper_visible:
if horizontal:
self._gripper_sizer_item = sizer.Add((gripper_size, 1), 0, wx.EXPAND)
else:
self._gripper_sizer_item = sizer.Add((1, gripper_size), 0, wx.EXPAND)
else:
self._gripper_sizer_item = None
# add "left" padding
if self._left_padding > 0:
if horizontal:
sizer.Add((self._left_padding, 1))
else:
sizer.Add((1, self._left_padding))
count = len(self._items)
for i, item in enumerate(self._items):
sizer_item = None
kind = item.kind
if kind == ITEM_LABEL:
size = self._art.GetLabelSize(dc, self, item)
sizer_item = sizer.Add((size.x + (self._tool_border_padding*2),
size.y + (self._tool_border_padding*2)),
item.proportion,
item.alignment)
if i+1 < count:
sizer.AddSpacer(self._tool_packing)
elif kind in [ITEM_CHECK, ITEM_NORMAL, ITEM_RADIO]:
size = self._art.GetToolSize(dc, self, item)
sizer_item = sizer.Add((size.x + (self._tool_border_padding*2),
size.y + (self._tool_border_padding*2)),
0,
item.alignment)
# add tool packing
if i+1 < count:
sizer.AddSpacer(self._tool_packing)
elif kind == ITEM_SEPARATOR:
if horizontal:
sizer_item = sizer.Add((separator_size, 1), 0, wx.EXPAND)
else:
sizer_item = sizer.Add((1, separator_size), 0, wx.EXPAND)
# add tool packing
if i+1 < count:
sizer.AddSpacer(self._tool_packing)
elif kind == ITEM_SPACER:
if item.proportion > 0:
sizer_item = sizer.AddStretchSpacer(item.proportion)
else:
sizer_item = sizer.Add((item.spacer_pixels, 1))
elif kind == ITEM_CONTROL:
vert_sizer = wx.BoxSizer(wx.VERTICAL)
vert_sizer.AddStretchSpacer(1)
ctrl_sizer_item = vert_sizer.Add(item.window, 0, wx.EXPAND)
vert_sizer.AddStretchSpacer(1)
if self._agwStyle & AUI_TB_TEXT and \
self._tool_text_orientation == AUI_TBTOOL_TEXT_BOTTOM and \
item.GetLabel() != "":
s = self.GetLabelSize(item.GetLabel())
vert_sizer.Add((1, s.y))
sizer_item = sizer.Add(vert_sizer, item.proportion, wx.EXPAND)
min_size = item.min_size
# proportional items will disappear from the toolbar if
# their min width is not set to something really small
if item.proportion != 0:
min_size.x = 1
if min_size.IsFullySpecified():
sizer.SetItemMinSize(vert_sizer, min_size)
vert_sizer.SetItemMinSize(item.window, min_size)
# add tool packing
if i+1 < count:
sizer.AddSpacer(self._tool_packing)
item.sizer_item = sizer_item
# add "right" padding
if self._right_padding > 0:
if horizontal:
sizer.Add((self._right_padding, 1))
else:
sizer.Add((1, self._right_padding))
# add drop down area
self._overflow_sizer_item = None
if self._agwStyle & AUI_TB_OVERFLOW:
overflow_size = self._art.GetElementSize(AUI_TBART_OVERFLOW_SIZE)
if overflow_size > 0 and self._overflow_visible:
if horizontal:
self._overflow_sizer_item = sizer.Add((overflow_size, 1), 0, wx.EXPAND)
else:
self._overflow_sizer_item = sizer.Add((1, overflow_size), 0, wx.EXPAND)
else:
self._overflow_sizer_item = None
# the outside sizer helps us apply the "top" and "bottom" padding
outside_sizer = wx.BoxSizer((horizontal and [wx.VERTICAL] or [wx.HORIZONTAL])[0])
# add "top" padding
if self._top_padding > 0:
if horizontal:
outside_sizer.Add((1, self._top_padding))
else:
outside_sizer.Add((self._top_padding, 1))
# add the sizer that contains all of the toolbar elements
outside_sizer.Add(sizer, 1, self._tool_alignment)
# add "bottom" padding
if self._bottom_padding > 0:
if horizontal:
outside_sizer.Add((1, self._bottom_padding))
else:
outside_sizer.Add((self._bottom_padding, 1))
del self._sizer # remove old sizer
self._sizer = outside_sizer
self.SetSizer(outside_sizer)
# calculate the rock-bottom minimum size
for item in self._items:
if item.sizer_item and item.proportion > 0 and item.min_size.IsFullySpecified():
item.sizer_item.SetMinSize((0, 0))
self._absolute_min_size = self._sizer.GetMinSize()
# reset the min sizes to what they were
for item in self._items:
if item.sizer_item and item.proportion > 0 and item.min_size.IsFullySpecified():
item.sizer_item.SetMinSize(item.min_size)
# set control size
size = self._sizer.GetMinSize()
self.SetMinSize(size)
self._minWidth = size.x
self._minHeight = size.y
if self._agwStyle & AUI_TB_NO_AUTORESIZE == 0:
cur_size = self.GetClientSize()
new_size = self.GetMinSize()
if new_size != cur_size:
self.SetClientSize(new_size)
else:
self._sizer.SetDimension(0, 0, cur_size.x, cur_size.y)
else:
cur_size = self.GetClientSize()
self._sizer.SetDimension(0, 0, cur_size.x, cur_size.y)
self.Refresh(False)
return True
def GetOverflowState(self):
""" Returns the state of the overflow button. """
return self._overflow_state
def GetOverflowRect(self):
""" Returns the rectangle of the overflow button. """
cli_rect = wx.RectPS(wx.Point(0, 0), self.GetClientSize())
overflow_rect = wx.Rect(*self._overflow_sizer_item.GetRect())
overflow_size = self._art.GetElementSize(AUI_TBART_OVERFLOW_SIZE)
if self._agwStyle & AUI_TB_VERTICAL:
overflow_rect.y = cli_rect.height - overflow_size
overflow_rect.x = 0
overflow_rect.width = cli_rect.width
overflow_rect.height = overflow_size
else:
overflow_rect.x = cli_rect.width - overflow_size
overflow_rect.y = 0
overflow_rect.width = overflow_size
overflow_rect.height = cli_rect.height
return overflow_rect
def GetLabelSize(self, label):
"""
Returns the standard size of a toolbar item.
:param `label`: a test label.
"""
dc = wx.ClientDC(self)
dc.SetFont(self._font)
return GetLabelSize(dc, label, self._tool_orientation != AUI_TBTOOL_HORIZONTAL)
def GetAuiManager(self):
""" Returns the L{AuiManager} which manages the toolbar. """
try:
return self._auiManager
except AttributeError:
return False
def SetAuiManager(self, auiManager):
""" Sets the L{AuiManager} which manages the toolbar. """
self._auiManager = auiManager
def DoIdleUpdate(self):
""" Updates the toolbar during idle times. """
handler = self.GetEventHandler()
if not handler:
return
need_refresh = False
for item in self._items:
if item.id == -1:
continue
evt = wx.UpdateUIEvent(item.id)
evt.SetEventObject(self)
if handler.ProcessEvent(evt):
if evt.GetSetEnabled():
if item.window:
is_enabled = item.window.IsEnabled()
else:
is_enabled = (item.state & AUI_BUTTON_STATE_DISABLED and [False] or [True])[0]
new_enabled = evt.GetEnabled()
if new_enabled != is_enabled:
if item.window:
item.window.Enable(new_enabled)
else:
if new_enabled:
item.state &= ~AUI_BUTTON_STATE_DISABLED
else:
item.state |= AUI_BUTTON_STATE_DISABLED
need_refresh = True
if evt.GetSetChecked():
# make sure we aren't checking an item that can't be
if item.kind != ITEM_CHECK and item.kind != ITEM_RADIO:
continue
is_checked = (item.state & AUI_BUTTON_STATE_CHECKED and [True] or [False])[0]
new_checked = evt.GetChecked()
if new_checked != is_checked:
if new_checked:
item.state |= AUI_BUTTON_STATE_CHECKED
else:
item.state &= ~AUI_BUTTON_STATE_CHECKED
need_refresh = True
if need_refresh:
self.Refresh(False)
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for L{AuiToolBar}.
:param `event`: a `wx.SizeEvent` event to be processed.
"""
x, y = self.GetClientSize()
realize = False
if x > y:
self.SetOrientation(wx.HORIZONTAL)
else:
self.SetOrientation(wx.VERTICAL)
if (x >= y and self._absolute_min_size.x > x) or (y > x and self._absolute_min_size.y > y):
# hide all flexible items
for item in self._items:
if item.sizer_item and item.proportion > 0 and item.sizer_item.IsShown():
item.sizer_item.Show(False)
item.sizer_item.SetProportion(0)
if self._originalStyle & AUI_TB_OVERFLOW:
if not self.GetOverflowVisible():
self.SetOverflowVisible(True)
realize = True
else:
if self._originalStyle & AUI_TB_OVERFLOW and not self._custom_overflow_append and \
not self._custom_overflow_prepend:
if self.GetOverflowVisible():
self.SetOverflowVisible(False)
realize = True
# show all flexible items
for item in self._items:
if item.sizer_item and item.proportion > 0 and not item.sizer_item.IsShown():
item.sizer_item.Show(True)
item.sizer_item.SetProportion(item.proportion)
self._sizer.SetDimension(0, 0, x, y)
if realize:
self.Realize()
else:
self.Refresh(False)
self.Update()
def DoSetSize(self, x, y, width, height, sizeFlags=wx.SIZE_AUTO):
"""
Sets the position and size of the window in pixels. The `sizeFlags`
parameter indicates the interpretation of the other params if they are
equal to -1.
:param `x`: the window `x` position;
:param `y`: the window `y` position;
:param `width`: the window width;
:param `height`: the window height;
:param `sizeFlags`: may have one of this bit set:
=================================== ======================================
Size Flags Description
=================================== ======================================
``wx.SIZE_AUTO`` A -1 indicates that a class-specific default should be used.
``wx.SIZE_AUTO_WIDTH`` A -1 indicates that a class-specific default should be used for the width.
``wx.SIZE_AUTO_HEIGHT`` A -1 indicates that a class-specific default should be used for the height.
``wx.SIZE_USE_EXISTING`` Existing dimensions should be used if -1 values are supplied.
``wx.SIZE_ALLOW_MINUS_ONE`` Allow dimensions of -1 and less to be interpreted as real dimensions, not default values.
``wx.SIZE_FORCE`` Normally, if the position and the size of the window are already the same as the parameters of this function, nothing is done. but with this flag a window resize may be forced even in this case (supported in wx 2.6.2 and later and only implemented for MSW and ignored elsewhere currently)
=================================== ======================================
:note: Overridden from `wx.PyControl`.
"""
parent_size = self.GetParent().GetClientSize()
if x + width > parent_size.x:
width = max(0, parent_size.x - x)
if y + height > parent_size.y:
height = max(0, parent_size.y - y)
wx.PyControl.DoSetSize(self, x, y, width, height, sizeFlags)
def OnIdle(self, event):
"""
Handles the ``wx.EVT_IDLE`` event for L{AuiToolBar}.
:param `event`: a `wx.IdleEvent` event to be processed.
"""
self.DoIdleUpdate()
event.Skip()
def DoGetBestSize(self):
"""
Gets the size which best suits the window: for a control, it would be the
minimal size which doesn't truncate the control, for a panel - the same
size as it would have after a call to `Fit()`.
:note: Overridden from `wx.PyControl`.
"""
return self._absolute_min_size
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for L{AuiToolBar}.
:param `event`: a `wx.PaintEvent` event to be processed.
"""
dc = wx.AutoBufferedPaintDC(self)
cli_rect = wx.RectPS(wx.Point(0, 0), self.GetClientSize())
horizontal = True
if self._agwStyle & AUI_TB_VERTICAL:
horizontal = False
if self._agwStyle & AUI_TB_PLAIN_BACKGROUND:
self._art.DrawPlainBackground(dc, self, cli_rect)
else:
self._art.DrawBackground(dc, self, cli_rect, horizontal)
gripper_size = self._art.GetElementSize(AUI_TBART_GRIPPER_SIZE)
dropdown_size = self._art.GetElementSize(AUI_TBART_OVERFLOW_SIZE)
# paint the gripper
if gripper_size > 0 and self._gripper_sizer_item:
gripper_rect = wx.Rect(*self._gripper_sizer_item.GetRect())
if horizontal:
gripper_rect.width = gripper_size
else:
gripper_rect.height = gripper_size
self._art.DrawGripper(dc, self, gripper_rect)
# calculated how far we can draw items
if horizontal:
last_extent = cli_rect.width
else:
last_extent = cli_rect.height
if self._overflow_visible:
last_extent -= dropdown_size
# paint each individual tool
for item in self._items:
if not item.sizer_item:
continue
item_rect = wx.Rect(*item.sizer_item.GetRect())
if (horizontal and item_rect.x + item_rect.width >= last_extent) or \
(not horizontal and item_rect.y + item_rect.height >= last_extent):
break
if item.kind == ITEM_SEPARATOR:
# draw a separator
self._art.DrawSeparator(dc, self, item_rect)
elif item.kind == ITEM_LABEL:
# draw a text label only
self._art.DrawLabel(dc, self, item, item_rect)
elif item.kind == ITEM_NORMAL:
# draw a regular button or dropdown button
if not item.dropdown:
self._art.DrawButton(dc, self, item, item_rect)
else:
self._art.DrawDropDownButton(dc, self, item, item_rect)
elif item.kind == ITEM_CHECK:
# draw a regular toggle button or a dropdown one
if not item.dropdown:
self._art.DrawButton(dc, self, item, item_rect)
else:
self._art.DrawDropDownButton(dc, self, item, item_rect)
elif item.kind == ITEM_RADIO:
# draw a toggle button
self._art.DrawButton(dc, self, item, item_rect)
elif item.kind == ITEM_CONTROL:
# draw the control's label
self._art.DrawControlLabel(dc, self, item, item_rect)
# fire a signal to see if the item wants to be custom-rendered
self.OnCustomRender(dc, item, item_rect)
# paint the overflow button
if dropdown_size > 0 and self._overflow_sizer_item:
dropdown_rect = self.GetOverflowRect()
self._art.DrawOverflowButton(dc, self, dropdown_rect, self._overflow_state)
def OnEraseBackground(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{AuiToolBar}.
:param `event`: a `wx.EraseEvent` event to be processed.
:note: This is intentionally empty, to reduce flicker.
"""
pass
def OnLeftDown(self, event):
"""
Handles the ``wx.EVT_LEFT_DOWN`` event for L{AuiToolBar}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
cli_rect = wx.RectPS(wx.Point(0, 0), self.GetClientSize())
self.StopPreviewTimer()
if self._gripper_sizer_item:
gripper_rect = wx.Rect(*self._gripper_sizer_item.GetRect())
if gripper_rect.Contains(event.GetPosition()):
# find aui manager
manager = self.GetAuiManager()
if not manager:
return
x_drag_offset = event.GetX() - gripper_rect.GetX()
y_drag_offset = event.GetY() - gripper_rect.GetY()
clientPt = wx.Point(*event.GetPosition())
screenPt = self.ClientToScreen(clientPt)
managedWindow = manager.GetManagedWindow()
managerClientPt = managedWindow.ScreenToClient(screenPt)
# gripper was clicked
manager.OnGripperClicked(self, managerClientPt, wx.Point(x_drag_offset, y_drag_offset))
return
if self._overflow_sizer_item:
overflow_rect = self.GetOverflowRect()
if self._art and self._overflow_visible and overflow_rect.Contains(event.GetPosition()):
e = AuiToolBarEvent(wxEVT_COMMAND_AUITOOLBAR_OVERFLOW_CLICK, -1)
e.SetEventObject(self)
e.SetToolId(-1)
e.SetClickPoint(event.GetPosition())
processed = self.ProcessEvent(e)
if processed:
self.DoIdleUpdate()
else:
overflow_items = []
# add custom overflow prepend items, if any
count = len(self._custom_overflow_prepend)
for i in xrange(count):
overflow_items.append(self._custom_overflow_prepend[i])
# only show items that don't fit in the dropdown
count = len(self._items)
for i in xrange(count):
if not self.GetToolFitsByIndex(i):
overflow_items.append(self._items[i])
# add custom overflow append items, if any
count = len(self._custom_overflow_append)
for i in xrange(count):
overflow_items.append(self._custom_overflow_append[i])
res = self._art.ShowDropDown(self, overflow_items)
self._overflow_state = 0
self.Refresh(False)
if res != -1:
e = wx.CommandEvent(wx.wxEVT_COMMAND_MENU_SELECTED, res)
e.SetEventObject(self)
if not self.GetParent().ProcessEvent(e):
tool = self.FindTool(res)
if tool:
state = (tool.state & AUI_BUTTON_STATE_CHECKED and [True] or [False])[0]
self.ToggleTool(res, not state)
return
self._dragging = False
self._action_pos = wx.Point(*event.GetPosition())
self._action_item = self.FindToolForPosition(*event.GetPosition())
if self._action_item:
if self._action_item.state & AUI_BUTTON_STATE_DISABLED:
self._action_pos = wx.Point(-1, -1)
self._action_item = None
return
self.SetPressedItem(self._action_item)
# fire the tool dropdown event
e = AuiToolBarEvent(wxEVT_COMMAND_AUITOOLBAR_TOOL_DROPDOWN, self._action_item.id)
e.SetEventObject(self)
e.SetToolId(self._action_item.id)
e.SetDropDownClicked(False)
mouse_x, mouse_y = event.GetX(), event.GetY()
rect = wx.Rect(*self._action_item.sizer_item.GetRect())
if self._action_item.dropdown:
if (self._action_item.orientation == AUI_TBTOOL_HORIZONTAL and \
mouse_x >= (rect.x+rect.width-BUTTON_DROPDOWN_WIDTH-1) and \
mouse_x < (rect.x+rect.width)) or \
(self._action_item.orientation != AUI_TBTOOL_HORIZONTAL and \
mouse_y >= (rect.y+rect.height-BUTTON_DROPDOWN_WIDTH-1) and \
mouse_y < (rect.y+rect.height)):
e.SetDropDownClicked(True)
e.SetClickPoint(event.GetPosition())
e.SetItemRect(rect)
self.ProcessEvent(e)
self.DoIdleUpdate()
def OnLeftUp(self, event):
"""
Handles the ``wx.EVT_LEFT_UP`` event for L{AuiToolBar}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
self.SetPressedItem(None)
hit_item = self.FindToolForPosition(*event.GetPosition())
if hit_item and not hit_item.state & AUI_BUTTON_STATE_DISABLED:
self.SetHoverItem(hit_item)
if self._dragging:
# reset drag and drop member variables
self._dragging = False
self._action_pos = wx.Point(-1, -1)
self._action_item = None
else:
if self._action_item and hit_item == self._action_item:
self.SetToolTipString("")
if hit_item.kind in [ITEM_CHECK, ITEM_RADIO]:
toggle = not (self._action_item.state & AUI_BUTTON_STATE_CHECKED)
self.ToggleTool(self._action_item.id, toggle)
# repaint immediately
self.Refresh(False)
self.Update()
e = wx.CommandEvent(wx.wxEVT_COMMAND_MENU_SELECTED, self._action_item.id)
e.SetEventObject(self)
e.SetInt(toggle)
self._action_pos = wx.Point(-1, -1)
self._action_item = None
self.ProcessEvent(e)
self.DoIdleUpdate()
else:
if self._action_item.id == ID_RESTORE_FRAME:
# find aui manager
manager = self.GetAuiManager()
if not manager:
return
pane = manager.GetPane(self)
e = framemanager.AuiManagerEvent(framemanager.wxEVT_AUI_PANE_MIN_RESTORE)
e.SetManager(manager)
e.SetPane(pane)
manager.ProcessEvent(e)
self.DoIdleUpdate()
else:
e = wx.CommandEvent(wx.wxEVT_COMMAND_MENU_SELECTED, self._action_item.id)
e.SetEventObject(self)
self.ProcessEvent(e)
self.DoIdleUpdate()
# reset drag and drop member variables
self._dragging = False
self._action_pos = wx.Point(-1, -1)
self._action_item = None
def OnRightDown(self, event):
"""
Handles the ``wx.EVT_RIGHT_DOWN`` event for L{AuiToolBar}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
cli_rect = wx.RectPS(wx.Point(0, 0), self.GetClientSize())
if self._gripper_sizer_item:
gripper_rect = self._gripper_sizer_item.GetRect()
if gripper_rect.Contains(event.GetPosition()):
return
if self._overflow_sizer_item:
dropdown_size = self._art.GetElementSize(AUI_TBART_OVERFLOW_SIZE)
if dropdown_size > 0 and event.m_x > cli_rect.width - dropdown_size and \
event.m_y >= 0 and event.m_y < cli_rect.height and self._art:
return
self._action_pos = wx.Point(*event.GetPosition())
self._action_item = self.FindToolForPosition(*event.GetPosition())
if self._action_item:
if self._action_item.state & AUI_BUTTON_STATE_DISABLED:
self._action_pos = wx.Point(-1, -1)
self._action_item = None
return
def OnRightUp(self, event):
"""
Handles the ``wx.EVT_RIGHT_UP`` event for L{AuiToolBar}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
hit_item = self.FindToolForPosition(*event.GetPosition())
if self._action_item and hit_item == self._action_item:
e = AuiToolBarEvent(wxEVT_COMMAND_AUITOOLBAR_RIGHT_CLICK, self._action_item.id)
e.SetEventObject(self)
e.SetToolId(self._action_item.id)
e.SetClickPoint(self._action_pos)
self.ProcessEvent(e)
self.DoIdleUpdate()
else:
# right-clicked on the invalid area of the toolbar
e = AuiToolBarEvent(wxEVT_COMMAND_AUITOOLBAR_RIGHT_CLICK, -1)
e.SetEventObject(self)
e.SetToolId(-1)
e.SetClickPoint(self._action_pos)
self.ProcessEvent(e)
self.DoIdleUpdate()
# reset member variables
self._action_pos = wx.Point(-1, -1)
self._action_item = None
def OnMiddleDown(self, event):
"""
Handles the ``wx.EVT_MIDDLE_DOWN`` event for L{AuiToolBar}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
cli_rect = wx.RectPS(wx.Point(0, 0), self.GetClientSize())
if self._gripper_sizer_item:
gripper_rect = self._gripper_sizer_item.GetRect()
if gripper_rect.Contains(event.GetPosition()):
return
if self._overflow_sizer_item:
dropdown_size = self._art.GetElementSize(AUI_TBART_OVERFLOW_SIZE)
if dropdown_size > 0 and event.m_x > cli_rect.width - dropdown_size and \
event.m_y >= 0 and event.m_y < cli_rect.height and self._art:
return
self._action_pos = wx.Point(*event.GetPosition())
self._action_item = self.FindToolForPosition(*event.GetPosition())
if self._action_item:
if self._action_item.state & AUI_BUTTON_STATE_DISABLED:
self._action_pos = wx.Point(-1, -1)
self._action_item = None
return
def OnMiddleUp(self, event):
"""
Handles the ``wx.EVT_MIDDLE_UP`` event for L{AuiToolBar}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
hit_item = self.FindToolForPosition(*event.GetPosition())
if self._action_item and hit_item == self._action_item:
if hit_item.kind == ITEM_NORMAL:
e = AuiToolBarEvent(wxEVT_COMMAND_AUITOOLBAR_MIDDLE_CLICK, self._action_item.id)
e.SetEventObject(self)
e.SetToolId(self._action_item.id)
e.SetClickPoint(self._action_pos)
self.ProcessEvent(e)
self.DoIdleUpdate()
# reset member variables
self._action_pos = wx.Point(-1, -1)
self._action_item = None
def OnMotion(self, event):
"""
Handles the ``wx.EVT_MOTION`` event for L{AuiToolBar}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
# start a drag event
if not self._dragging and self._action_item != None and self._action_pos != wx.Point(-1, -1) and \
abs(event.m_x - self._action_pos.x) + abs(event.m_y - self._action_pos.y) > 5:
self.SetToolTipString("")
self._dragging = True
e = AuiToolBarEvent(wxEVT_COMMAND_AUITOOLBAR_BEGIN_DRAG, self.GetId())
e.SetEventObject(self)
e.SetToolId(self._action_item.id)
self.ProcessEvent(e)
self.DoIdleUpdate()
return
hit_item = self.FindToolForPosition(*event.GetPosition())
if hit_item:
if not hit_item.state & AUI_BUTTON_STATE_DISABLED:
self.SetHoverItem(hit_item)
else:
self.SetHoverItem(None)
else:
# no hit item, remove any hit item
self.SetHoverItem(hit_item)
# figure out tooltips
packing_hit_item = self.FindToolForPositionWithPacking(*event.GetPosition())
if packing_hit_item:
if packing_hit_item != self._tip_item:
self._tip_item = packing_hit_item
if packing_hit_item.short_help != "":
self.StartPreviewTimer()
self.SetToolTipString(packing_hit_item.short_help)
else:
self.SetToolTipString("")
self.StopPreviewTimer()
else:
self.SetToolTipString("")
self._tip_item = None
self.StopPreviewTimer()
# if we've pressed down an item and we're hovering
# over it, make sure it's state is set to pressed
if self._action_item:
if self._action_item == hit_item:
self.SetPressedItem(self._action_item)
else:
self.SetPressedItem(None)
# figure out the dropdown button state (are we hovering or pressing it?)
self.RefreshOverflowState()
def OnLeaveWindow(self, event):
"""
| Handles the ``wx.EVT_LEAVE_WINDOW`` event for L{AuiToolBar}. | 11,431 | lcc_e | python | null | d700d0a6c77d8cec0b9ffe05f9eea7b58a8b2af36fbaa29d |
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Xibo - Digitial Signage - http://www.xibo.org.uk
# Copyright (C) 2009-2014 Alex Harrington
#
# This file is part of Xibo.
#
# Xibo is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# Xibo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Xibo. If not, see <http://www.gnu.org/licenses/>.
#
from libavg import *
from SOAPpy import WSDL
import SOAPpy.Types
import SOAPpy.Errors
import xml.parsers.expat
from xml.dom import minidom
import time
import uuid
import hashlib
import Queue
import ConfigParser
import gettext
import os
import fnmatch
import re
import datetime
import sys
import socket
import inspect
from collections import defaultdict
from threading import Thread, Semaphore
import threading
import urlparse
import PIL.Image
import math
import platform
import shutil
from ThirdParty.period.period import in_period
version = "1.6.0-rc2"
# What layout schema version is supported
schemaVersion = 1
#### Abstract Classes
class XiboLog:
"Abstract Class - Interface for Loggers"
level = 0
def __init__(self, level): abstract
def log(self, level, category, message, osd = False): abstract
def stat(self, statType, fromDT, toDT, tag, layoutID, scheduleID, mediaID): abstract
def setXmds(self, xmds):
pass
def flush(self):
pass
def setupInfo(self, p):
self.p = p
try:
self.liftEnabled = config.get('Lift', 'enabled')
if self.liftEnabled == "false":
self.liftEnabled = False
log.log(3, "audit", _("Disabling lift functionality in Logger"))
else:
self.liftEnabled = True
log.log(3, "audit", _("Enabling lift functionality in Logger"))
except:
self.liftEnabled = False
log.log(3, "error", _("Lift->enabled not defined in configuration. Disabling lift functionality in Logger"))
# Populate the info screen
# Background.
tmpXML = '<rect fillcolor="ffffff" id="infoBG" fillopacity="0.75" size="(400,300)" />'
self.p.enqueue('add', (tmpXML, 'info'))
# Logo + version bottom right
tmpXML = '<image href="resources/logo.png" id="infoLOGO" opacity="1" width="50" height="18" x="345" y="276" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<words x="290" y="280" opacity="1" text="v' + version + '" font="Arial" color="000000" fontsize="12" />'
self.p.enqueue('add', (tmpXML, 'info'))
# Required Files Traffic Light
tmpXML = '<image href="resources/dotgrey.png" id="infoRFGrey" opacity="1" width="20" height="20" x="5" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoRFRed" opacity="0" width="20" height="20" x="5" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoRFAmber" opacity="0" width="20" height="20" x="5" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoRFGreen" opacity="0" width="20" height="20" x="5" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<words x="10" y="270" opacity="1" text="Required Files" font="Arial" color="000000" fontsize="10" angle="-1.57079633" pivot="(0,0)"/>'
self.p.enqueue('add' ,(tmpXML, 'info'))
# GetFile Traffic Light
tmpXML = '<image href="resources/dotgrey.png" id="infoGFGrey" opacity="1" width="20" height="20" x="30" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoGFRed" opacity="0" width="20" height="20" x="30" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoGFAmber" opacity="0" width="20" height="20" x="30" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoGFGreen" opacity="0" width="20" height="20" x="30" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<words x="35" y="270" opacity="1" text="Get File" font="Arial" color="000000" fontsize="10" angle="-1.57079633" pivot="(0,0)"/>'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<words id="infoRunningDownloads" x="37" y="278" opacity="1" text="0" font="Arial" color="000000" fontsize="10" />'
self.p.enqueue('add', (tmpXML, 'info'))
# Schedule Traffic Light
tmpXML = '<image href="resources/dotgrey.png" id="infoSGrey" opacity="1" width="20" height="20" x="55" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoSRed" opacity="0" width="20" height="20" x="55" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoSAmber" opacity="0" width="20" height="20" x="55" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoSGreen" opacity="0" width="20" height="20" x="55" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<words x="60" y="270" opacity="1" text="Schedule" font="Arial" color="000000" fontsize="10" angle="-1.57079633" pivot="(0,0)"/>'
self.p.enqueue('add', (tmpXML, 'info'))
# RegisterDisplay Traffic Light
tmpXML = '<image href="resources/dotgrey.png" id="infoRDGrey" opacity="1" width="20" height="20" x="80" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoRDRed" opacity="0" width="20" height="20" x="80" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoRDAmber" opacity="0" width="20" height="20" x="80" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoRDGreen" opacity="0" width="20" height="20" x="80" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<words x="85" y="270" opacity="1" text="Register Display" font="Arial" color="000000" fontsize="10" angle="-1.57079633" pivot="(0,0)"/>'
self.p.enqueue('add', (tmpXML, 'info'))
# Logs Traffic Light
tmpXML = '<image href="resources/dotgrey.png" id="infoLogGrey" opacity="1" width="20" height="20" x="105" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLogRed" opacity="0" width="20" height="20" x="105" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLogAmber" opacity="0" width="20" height="20" x="105" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLogGreen" opacity="0" width="20" height="20" x="105" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<words x="110" y="270" opacity="1" text="Log" font="Arial" color="000000" fontsize="10" angle="-1.57079633" pivot="(0,0)"/>'
self.p.enqueue('add', (tmpXML, 'info'))
# Stats Traffic Light
tmpXML = '<image href="resources/dotgrey.png" id="infoStatGrey" opacity="1" width="20" height="20" x="130" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoStatRed" opacity="0" width="20" height="20" x="130" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoStatAmber" opacity="0" width="20" height="20" x="130" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoStatGreen" opacity="0" width="20" height="20" x="130" y="275" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<words x="135" y="270" opacity="1" text="Stats" font="Arial" color="000000" fontsize="10" angle="-1.57079633" pivot="(0,0)"/>'
self.p.enqueue('add', (tmpXML, 'info'))
# Offline Update traffic light
tmpXML = '<image href="resources/dotamber.png" id="offlineUpdateAmber" opacity="0" width="20" height="20" x="20" y="20" />'
self.p.enqueue('add', (tmpXML, 'offlineUpdate'))
tmpXML = '<image href="resources/dotgreen.png" id="offlineUpdateGreen" opacity="0" width="20" height="20" x="20" y="20" />'
self.p.enqueue('add', (tmpXML, 'offlineUpdate'))
# IP Address
tmpXML = '<words x="5" y="5" opacity="1" text="IP Address: " font="Arial" color="000000" fontsize="11" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<words id="infoIP" x="75" y="5" opacity="1" text="" font="Arial" color="000000" fontsize="11" width="180" linespacing="10" alignment="left" />'
self.p.enqueue('add', (tmpXML, 'info'))
# Disk Space
tmpXML = '<words x="5" y="18" opacity="1" text="Disk Space: " font="Arial" color="000000" fontsize="11" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<words id="infoDisk" x="75" y="18" opacity="1" text="" font="Arial" color="000000" fontsize="11" width="180" linespacing="10" alignment="left" />'
self.p.enqueue('add', (tmpXML, 'info'))
# Lift Traffic Lights
if self.liftEnabled:
tmpXML = '<image href="resources/dotgrey.png" id="infoLift1Grey" opacity="1" width="5" height="5" x="165" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift1Red" opacity="0" width="5" height="5" x="165" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift1Amber" opacity="0" width="5" height="5" x="165" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift1Green" opacity="0" width="5" height="5" x="165" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift2Grey" opacity="1" width="5" height="5" x="170" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift2Red" opacity="0" width="5" height="5" x="170" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift2Amber" opacity="0" width="5" height="5" x="170" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift2Green" opacity="0" width="5" height="5" x="170" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift3Grey" opacity="1" width="5" height="5" x="175" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift3Red" opacity="0" width="5" height="5" x="175" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift3Amber" opacity="0" width="5" height="5" x="175" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift3Green" opacity="0" width="5" height="5" x="175" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift4Grey" opacity="1" width="5" height="5" x="180" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift4Red" opacity="0" width="5" height="5" x="180" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift4Amber" opacity="0" width="5" height="5" x="180" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift4Green" opacity="0" width="5" height="5" x="180" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift5Grey" opacity="1" width="5" height="5" x="190" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift5Red" opacity="0" width="5" height="5" x="190" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift5Amber" opacity="0" width="5" height="5" x="190" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift5Green" opacity="0" width="5" height="5" x="190" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift6Grey" opacity="1" width="5" height="5" x="195" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift6Red" opacity="0" width="5" height="5" x="195" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift6Amber" opacity="0" width="5" height="5" x="195" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift6Green" opacity="0" width="5" height="5" x="195" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift7Grey" opacity="1" width="5" height="5" x="200" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift7Red" opacity="0" width="5" height="5" x="200" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift7Amber" opacity="0" width="5" height="5" x="200" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift7Green" opacity="0" width="5" height="5" x="200" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift8Grey" opacity="1" width="5" height="5" x="205" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift8Red" opacity="0" width="5" height="5" x="205" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift8Amber" opacity="0" width="5" height="5" x="205" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift8Green" opacity="0" width="5" height="5" x="205" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift9Grey" opacity="1" width="5" height="5" x="215" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift9Red" opacity="0" width="5" height="5" x="215" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift9Amber" opacity="0" width="5" height="5" x="215" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift9Green" opacity="0" width="5" height="5" x="215" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift10Grey" opacity="1" width="5" height="5" x="220" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift10Red" opacity="0" width="5" height="5" x="220" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift10Amber" opacity="0" width="5" height="5" x="220" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift10Green" opacity="0" width="5" height="5" x="220" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift11Grey" opacity="1" width="5" height="5" x="225" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift11Red" opacity="0" width="5" height="5" x="225" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift11Amber" opacity="0" width="5" height="5" x="225" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift11Green" opacity="0" width="5" height="5" x="225" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift12Grey" opacity="1" width="5" height="5" x="230" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift12Red" opacity="0" width="5" height="5" x="230" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift12Amber" opacity="0" width="5" height="5" x="230" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift12Green" opacity="0" width="5" height="5" x="230" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift13Grey" opacity="1" width="5" height="5" x="240" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift13Red" opacity="0" width="5" height="5" x="240" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift13Amber" opacity="0" width="5" height="5" x="240" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift13Green" opacity="0" width="5" height="5" x="240" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift14Grey" opacity="1" width="5" height="5" x="245" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift14Red" opacity="0" width="5" height="5" x="245" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift14Amber" opacity="0" width="5" height="5" x="245" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift14Green" opacity="0" width="5" height="5" x="245" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift15Grey" opacity="1" width="5" height="5" x="250" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift15Red" opacity="0" width="5" height="5" x="250" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift15Amber" opacity="0" width="5" height="5" x="250" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift15Green" opacity="0" width="5" height="5" x="250" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgrey.png" id="infoLift16Grey" opacity="1" width="5" height="5" x="255" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotred.png" id="infoLift16Red" opacity="0" width="5" height="5" x="255" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotamber.png" id="infoLift16Amber" opacity="0" width="5" height="5" x="255" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<image href="resources/dotgreen.png" id="infoLift16Green" opacity="0" width="5" height="5" x="255" y="285" />'
self.p.enqueue('add', (tmpXML, 'info'))
# Lift Tag
tmpXML = '<words id="infoLiftTag" x="165" y="265" opacity="1" text="Current Tag: default" font="Arial" color="000000" fontsize="11" />'
self.p.enqueue('add', (tmpXML, 'info'))
# Schedule
tmpXML = '<words x="5" y="75" opacity="1" text="Schedule" font="Arial" color="000000" fontsize="14" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<words id="infoCurrentSchedule" x="5" y="90" opacity="1" text="" font="Arial" color="000000" fontsize="11" width="180" linespacing="10" alignment="left" />'
self.p.enqueue('add', (tmpXML, 'info'))
# Now Playing
tmpXML = '<words x="5" y="40" opacity="1" text="Now Playing" font="Arial" color="000000" fontsize="14" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<words id="infoNowPlaying" x="5" y="55" opacity="1" text="" font="Arial" color="000000" fontsize="11" />'
self.p.enqueue('add', (tmpXML, 'info'))
# Media
tmpXML = '<words x="205" y="5" opacity="1" text="Media" font="Arial" color="000000" fontsize="14" />'
self.p.enqueue('add', (tmpXML, 'info'))
tmpXML = '<words id="infoMedia" x="205" y="20" opacity="1" text="" font="Arial" color="000000" fontsize="11" />'
self.p.enqueue('add', (tmpXML, 'info'))
# On Screen Logging
tmpXML = '<rect fillcolor="ffffff" id="osLogBG" fillopacity="0.75" size="(%d,%d)" />' % (self.p.osLogX, 20)
self.p.enqueue('add', (tmpXML, 'osLog'))
tmpXML = '<words id="osLogText" x="5" y="3" opacity="1" text="Xibo Client v%s" font="Arial" color="000000" fontsize="11" />' % version
self.p.enqueue('add', (tmpXML, 'osLog'))
def lights(self, field, value):
if value == "green":
self.p.enqueue('setOpacity', ("info" + field + "Green", 1))
self.p.enqueue('setOpacity', ("info" + field + "Grey", 0))
self.p.enqueue('setOpacity', ("info" + field + "Amber", 0))
self.p.enqueue('setOpacity', ("info" + field + "Red", 0))
if value == "red":
self.p.enqueue('setOpacity', ("info" + field + "Green", 0))
self.p.enqueue('setOpacity', ("info" + field + "Grey", 0))
self.p.enqueue('setOpacity', ("info" + field + "Amber", 0))
self.p.enqueue('setOpacity', ("info" + field + "Red", 1))
if value == "amber":
self.p.enqueue('setOpacity', ("info" + field + "Green", 0))
self.p.enqueue('setOpacity', ("info" + field + "Grey", 0))
self.p.enqueue('setOpacity', ("info" + field + "Amber", 1))
self.p.enqueue('setOpacity', ("info" + field + "Red", 0))
if value == "grey":
self.p.enqueue('setOpacity', ("info" + field + "Green", 0))
self.p.enqueue('setOpacity', ("info" + field + "Grey", 1))
self.p.enqueue('setOpacity', ("info" + field + "Amber", 0))
self.p.enqueue('setOpacity', ("info" + field + "Red", 0))
if value == "start":
self.p.enqueue('setOpacity', ("%sAmber" % field, 1))
self.p.enqueue('setOpacity', ("%sGreen" % field, 0))
if value == "finish":
self.p.enqueue('setOpacity', ("%sAmber" % field, 0))
self.p.enqueue('setOpacity', ("%sGreen" % field, 1))
self.p.enqueue('anim', ('fadeOut', '%sGreen' % field, 3000, None))
def updateSchedule(self, schedule):
self.p.enqueue('del', 'infoCurrentSchedule')
tmpXML = '<words id="infoCurrentSchedule" x="5" y="90" opacity="1" text="' + schedule + '" font="Arial" color="000000" fontsize="11" width="180" linespacing="10" alignment="left" />'
self.p.enqueue('add', (tmpXML, 'info'))
def updateNowPlaying(self, now):
self.p.enqueue('del', 'infoNowPlaying')
tmpXML = '<words id="infoNowPlaying" x="5" y="55" opacity="1" text="' + now + '" font="Arial" color="000000" fontsize="11" />'
self.p.enqueue('add', (tmpXML, 'info'))
def updateMedia(self, media):
self.p.enqueue('del', 'infoMedia')
tmpXML = '<words id="infoMedia" x="205" y="20" opacity="1" font="Arial" color="000000" fontsize="11" width="180">' + media + '</words>'
self.p.enqueue('add', (tmpXML, 'info'))
def updateRunningDownloads(self, num):
self.p.enqueue('del', 'infoRunningDownloads')
tmpXML = '<words id="infoRunningDownloads" x="37" y="278" opacity="1" text="' + str(num) + '" font="Arial" color="000000" fontsize="10" />'
self.p.enqueue('add', (tmpXML, 'info'))
def updateIP(self, serverIP):
self.p.enqueue('del', 'infoIP')
tmpXML = '<words id="infoIP" x="75" y="5" opacity="1" text="' + str(serverIP) + '" font="Arial" color="000000" fontsize="10" />'
self.p.enqueue('add', (tmpXML, 'info'))
def updateFreeSpace(self, tup):
perc = int((tup[1] * 1.0 / tup[0]) * 100)
self.p.enqueue('del', 'infoDisk')
tmpXML = '<words id="infoDisk" x="75" y="18" opacity="1" text="' + self.bytestr(tup[1]) + ' (' + str(perc) + '%) free" font="Arial" color="000000" fontsize="10" />'
self.p.enqueue('add', (tmpXML, 'info'))
# Convert a value in bytes to human readable format
# Taken from http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
# By Sridhar Ratnakumar
# Assumed Public Domain
def bytestr(self, size):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return "%3.1f %s" % (size, x)
size /= 1024.0
def updateLift(self, tag):
# Break out if lift is disabled
if not self.liftEnabled:
return
self.p.enqueue('del', 'infoLiftTag')
tmpXML = '<words id="infoLiftTag" x="165" y="265" opacity="1" text="Current Tag: ' + tag + '" font="Arial" color="000000" fontsize="11" />'
self.p.enqueue('add', (tmpXML, 'info'))
def osLog(self, message):
self.p.enqueue('del', 'osLogText')
tmpXML = '<words id="osLogText" x="5" y="3" opacity="1" text="%s" font="Arial" color="000000" fontsize="11" />' % message
self.p.enqueue('add', (tmpXML, 'osLog'))
class XiboScheduler(Thread):
"Abstract Class - Interface for Schedulers"
def run(self): abstract
def nextLayout(self): abstract
def hasNext(self): abstract
#### Finish Abstract Classes
#### Log Classes
class XiboLogSplit(XiboLog):
"Xibo Log Splitter - so log output can go to two log objects"
# Currently non-functional
def __init__(self,level):
self.level = int(level)
logWriter1 = config.get('Logging','splitLogWriter1')
logWriter2 = config.get('Logging','splitLogWriter2')
self.log1 = eval(logWriter1)(self.level)
self.log2 = eval(logWriter2)(self.level)
self.log(2,"info",_("XiboLogSplit logger started at level ") + str(level))
def log(self, severity, category, message, osd=False):
self.log1.log(severity, category, message, osd)
self.log2.log(severity, category, message, osd)
def stat(self, statType, fromDT, toDT, tag, layoutID, scheduleID, mediaID=""):
self.log1.stat(statType, fromDT, toDT, tag, layoutID, scheduleID, mediaID)
self.log2.stat(statType, fromDT, toDT, tag, layoutID, scheduleID, mediaID)
def flush(self):
self.log1.flush()
self.log2.flush()
class XiboLogFile(XiboLog):
"Xibo Logger - to file"
def __init__(self,level):
try:
self.fh = open('run.log','w')
except:
print "Unable to open run.log for writing."
# Make sure level is sane
if level == "" or int(level) < 0:
level=0
self.level = int(level)
self.log(2,"info",_("XiboLogFile logger started at level ") + str(level))
def log(self, severity, category, message, osd=False):
if self.level >= severity:
# Define these two here incase an exception is thrown below.
callingMethod = "unknown"
callingClass = ""
try:
currFrame = inspect.currentframe().f_back
inspArray = inspect.getframeinfo(currFrame)
callingMethod = inspArray[2]
callingLineNumber = inspArray[1]
# TODO: Figure out how to get the class name too
callingClass = ""
finally:
del currFrame
function = callingClass + "." + callingMethod
date = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
self.fh.write("LOG: " + str(date) + " (" + str(function) + ":" + str(callingLineNumber) + ") " + str(severity) + " " + category + " " + message + "\n")
self.fh.flush()
# If osLog is enabled, update the status
if osd and self.p.osLog:
self.osLog(message)
def stat(self,statType, fromDT, toDT, tag, layoutID, scheduleID, mediaID):
pass
class XiboLogScreen(XiboLog):
"Xibo Logger - to screen"
def __init__(self,level):
# Make sure level is sane
if level == "" or int(level) < 0:
level=0
self.level = int(level)
self.log(2,"info",_("XiboLogScreen logger started at level ") + str(level))
def log(self, severity, category, message, osd=False):
if self.level >= severity:
print "LOG: " + str(time.time()) + " " + str(severity) + " " + category + " " + message
# If osLog is enabled, update the status
if osd and self.p.osLog:
self.osLog(message)
def stat(self, statType, fromDT, toDT, tag, layoutID, scheduleID, mediaID=""):
print "STAT: " + statType + " " + tag + " " + str(layoutID) + " " + str(scheduleID) + " " + str(mediaID)
class XiboLogXmds(XiboLog):
def __init__(self,level):
# Make sure level is sane
if level == "" or int(level) < 0:
level=0
self.level = int(level)
self.logs = Queue.Queue(0)
self.stats = Queue.Queue(0)
# Find out if we're doing stats, and how big the queue should be...
try:
statsOn = config.get('Stats','collect')
if statsOn == 'true':
self.statsOn = True
else:
self.statsOn = False
except ConfigParser.NoOptionError:
self.statsOn = False
try:
self.statsQueueSize = int(config.get('Stats','queueSize'))
except ConfigParser.NoOptionError:
self.statsQueueSize = 99
self.worker = XiboLogXmdsWorker(self.logs,self.stats,self.statsQueueSize)
self.worker.start()
# Fast non-blocking log and stat functions
# Logs and Stats pushed in native format on to the queue.
# A worker thread will then format the messages in to XML
# ready for transmission to the server.
def log(self, severity, category, message, osd=False):
if self.level >= severity:
try:
currFrame = inspect.currentframe().f_back
inspArray = inspect.getframeinfo(currFrame)
callingMethod = inspArray[2]
callingLineNumber = inspArray[1]
# TODO: Figure out how to get the class name too
callingClass = ""
finally:
del currFrame
function = callingClass + "." + callingMethod
date = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
self.logs.put((date,severity,category,function,callingLineNumber,message),False)
# If osLog is enabled, update the status
if osd and self.p.osLog:
self.osLog(message)
def stat(self, statType, fromDT, toDT, tag, layoutID, scheduleID, mediaID):
if self.statsOn:
self.stats.put((statType,fromDT,toDT,tag,layoutID,scheduleID,mediaID),False)
return
def setXmds(self,xmds):
self.worker.xmds = xmds
def flush(self):
# TODO: Seems to cause the client to hang on quit?
if not self.worker.xmds is None:
self.worker.flush = True
self.worker.process()
class XiboLogXmdsWorker(Thread):
def __init__(self,logs,stats,statsQueueSize):
Thread.__init__(self)
self.xmds = None
self.logs = logs
self.stats = stats
self.running = True
self.statXml = minidom.Document()
self.statsE = self.statXml.createElement("stat")
self.statXml.appendChild(self.statsE)
self.statsQueueSize = statsQueueSize
self.logXml = minidom.Document()
self.logE = self.logXml.createElement("log")
self.logXml.appendChild(self.logE)
self.flush = False
self.processing = False
self.__lock = Semaphore()
def run(self):
# Wait for XMDS to be initialised and available to us
while self.xmds is None:
time.sleep(60)
while self.running:
if (self.processing):
pass
else:
self.process()
time.sleep(30)
def process(self):
self.__lock.acquire()
self.processing = True
# Deal with logs:
try:
# Prepare logs to XML and store in self.logXml
while True:
date, severity, category, function, lineNo, message = self.logs.get(False)
traceE = self.logXml.createElement("trace")
traceE.setAttribute("date",date)
traceE.setAttribute("category",category)
self.logE.appendChild(traceE)
messageE = self.logXml.createElement("message")
messageTxt = self.logXml.createTextNode(message)
messageE.appendChild(messageTxt)
scheduleE = self.logXml.createElement("scheduleid")
layoutE = self.logXml.createElement("layoutid")
mediaE = self.logXml.createElement("mediaid")
methodE = self.logXml.createElement("method")
methodTxt = self.logXml.createTextNode(function)
methodE.appendChild(methodTxt)
lineE = self.logXml.createElement("line")
lineTxt = self.logXml.createTextNode(str(lineNo))
lineE.appendChild(lineTxt)
traceE.appendChild(messageE)
traceE.appendChild(scheduleE)
traceE.appendChild(layoutE)
traceE.appendChild(mediaE)
traceE.appendChild(methodE)
traceE.appendChild(lineE)
except Queue.Empty:
# Exception thrown breaks the inner while loop
# Do nothing
pass
if len(self.logE.childNodes) > 0:
# Get each trace in turn and send it to XMDS
traceNodes = self.logXml.getElementsByTagName('trace')
nExceptions = 0
xml = '<log>'
nodes = []
nProcessed = 0
for trace in traceNodes:
# Ship the logXml off to XMDS
if len(nodes) < 10:
nProcessed += 1
nodes.append(trace)
xml += trace.toxml()
if len(nodes) >= 10 or nProcessed == len(traceNodes):
try:
self.xmds.SubmitLog(xml + "</log>")
xml = '<log>'
for n in nodes:
self.logE.removeChild(n)
nodes = []
except XMDSException:
nExceptions += 1
if nExceptions > 4:
break
except:
pass
if len(self.logXml.getElementsByTagName('trace')) > 0:
# Some logs didn't send
# Flush to disk
# Check the log folder exists:
try:
os.makedirs(config.get('Main','libraryDir') + os.sep + 'log')
except:
pass
try:
try:
f = open(config.get('Main','libraryDir') + os.sep + 'log' + os.sep + 'log' + str(time.time()) + '.ready','w')
f.write(self.logXml.toprettyxml())
self.logXml.unlink()
self.logXml = minidom.Document()
self.logE = self.logXml.createElement("log")
self.logXml.appendChild(self.logE)
finally:
f.close()
except:
pass
else:
# All the logs send
# Read in a past log file and append to logE for processing on the next run
readOne = False
# If this is a flush being called, skip reading in a new file as it will be lost in memory.
if self.flush:
readOne = True
# Check the log folder exists:
try:
os.makedirs(config.get('Main','libraryDir') + os.sep + 'log')
except:
pass
for f in os.listdir(config.get('Main','libraryDir') + os.sep + 'log'):
if readOne == False:
if fnmatch.fnmatch(f, '*.ready'):
try:
self.logXml.unlink()
self.logXml = minidom.parse(config.get('Main','libraryDir') + os.sep + 'log' + os.sep + f)
for n in self.logXml.getElementsByTagName('log'):
self.logE = n
except:
# File must be invalid. Delete it
try:
os.remove(config.get('Main','libraryDir') + os.sep + 'log' + os.sep + f)
except:
readOne = False
continue
# Now the file is back in memory, delete it
try:
os.remove(config.get('Main','libraryDir') + os.sep + 'log' + os.sep + f)
readOne = True
except:
pass
# Deal with stats:
try:
# Prepare stats to XML and store in self.statXml
while True:
statType, fromDT, toDT, tag, layoutID, scheduleID, mediaID = self.stats.get(False)
statE = self.statXml.createElement("stat")
statE.setAttribute("type",statType)
statE.setAttribute("fromdt",fromDT)
statE.setAttribute("todt",toDT)
if statType == "event":
statE.setAttribute("tag",tag)
elif statType == "media":
statE.setAttribute("mediaid",mediaID)
statE.setAttribute("layoutid",layoutID)
elif statType == "layout":
statE.setAttribute("layoutid",layoutID)
statE.setAttribute("scheduleid",scheduleID)
self.statsE.appendChild(statE)
except Queue.Empty:
# Exception thrown breaks the inner while loop
# Do nothing
pass
if len(self.statsE.childNodes) >= self.statsQueueSize or self.flush:
self.flush = False
try:
# Ship the statXml off to XMDS
self.xmds.SubmitStats(self.statXml.toxml())
# Reset statXml
self.statXml = minidom.Document()
self.statsE = self.logXml.createElement("stats")
self.statXml.appendChild(self.statsE)
try:
os.remove(config.get('Main','libraryDir') + os.sep + 'stats.xml')
except:
pass
except XMDSException:
# Flush to disk incase we crash before getting another chance
try:
try:
f = open(config.get('Main','libraryDir') + os.sep + 'stats.xml','w')
f.write(self.statXml.toprettyxml())
finally:
f.close()
except:
pass
self.processing = False
self.__lock.release()
#### Finish Log Classes
#### Download Manager
class XiboFile(object):
def __init__(self,fileName,targetHash,fileId,fileType,mtime=0):
self.__path = os.path.join(config.get('Main','libraryDir'),fileName)
self.__fileName = fileName
self.md5 = "NOT CALCULATED"
self.checkTime = 1
self.fileType = fileType
self.fileId = fileId
self.targetHash = targetHash
self.mtime = mtime
self.paranoid = config.getboolean('Main','checksumPreviousDownloads')
if self.paranoid:
self.update()
else:
self.paranoid = False
try:
if os.path.getmtime(self.__path) == self.mtime:
self.md5 = self.targetHash
else:
self.update()
except:
self.update()
def update(self):
# Generate MD5
m = hashlib.md5()
try:
# print "*** GENERATING MD5 for file %s" % self.__fileName
for line in open(self.__path,"rb"):
m.update(line)
except IOError:
# print "*** IOERROR"
return False
self.md5 = m.hexdigest()
self.mtime = os.path.getmtime(self.__path)
self.checkTime = time.time()
return True
def isExpired(self):
if self.paranoid:
return self.checkTime + 3600 < time.time()
else:
try:
tmpMtime = os.path.getmtime(self.__path)
except:
return False
return not self.mtime == tmpMtime
def isValid(self):
try:
tmpMtime = os.path.getmtime(self.__path)
except:
return False
return (self.md5 == self.targetHash) and (self.mtime == tmpMtime)
def toTuple(self):
return (self.__fileName,self.md5,self.targetHash,self.checkTime,self.mtime,self.fileId,self.fileType)
class XiboResourceFile(object):
def __init__(self,fileName,targetHash,fileId,fileType,mtime=0):
self.__path = os.path.join(config.get('Main','libraryDir'),fileName)
self.__fileName = fileName
self.md5 = "NOT CALCULATED"
self.checkTime = 1
self.fileType = fileType
self.fileId = fileId
self.paranoid = False
self.targetHash = targetHash
self.mtime = mtime
try:
if os.path.getmtime(self.__path) == self.mtime:
self.md5 = self.targetHash
else:
self.update()
except:
self.update()
def update(self):
try:
tmpMtime = os.path.getmtime(self.__path)
except:
return False
self.mtime = tmpMtime
self.checkTime = time.time()
return True
def isExpired(self):
try:
tmpMtime = os.path.getmtime(self.__path)
except:
return False
return not self.mtime == tmpMtime
def isValid(self):
try:
tmpMtime = os.path.getmtime(self.__path)
except:
return False
return True
def toTuple(self):
return (self.__fileName,self.targetHash,self.targetHash,self.checkTime,self.mtime,self.fileId,self.fileType)
class XiboDownloadManager(Thread):
def __init__(self,xmds,player,parent):
Thread.__init__(self)
log.log(3,"info",_("New XiboDownloadManager instance created."))
self.xmds = xmds
self.running = True
self.dlQueue = Queue.Queue(0)
self.p = player # XiboPlayer Instance
self.parent = parent # Parent XiboDisplayManager Instance
self.__lock = Semaphore()
self.__lock.acquire()
self.offline = config.getboolean('Main','manualUpdate')
self.nextLayoutOnComplete = False
self.chainScheduler = False
self.cleanup = config.getboolean('Main','cleanOldMediaFiles')
self.lastCleanup = 0
# Store a dictionary of XiboDownloadThread objects so we know
# which files are downloading and how many download slots
# there are free
self.runningDownloads = defaultdict(XiboDownloadThread)
# How many XiboDownloadThreads should run at once
self.maxDownloads = 5
# Populate md5Cache
if config.get('Main','checksumPreviousDownloads') == "false":
try:
tmpDoc = minidom.parse(os.path.join(config.get('Main','libraryDir'),'cache.xml'))
for f in tmpDoc.getElementsByTagName('file'):
tmpFileName = str(f.attributes['name'].value)
tmpHash = str(f.attributes['md5'].value)
tmpMtime = float(f.attributes['mtime'].value)
tmpId = int(f.attributes['id'].value)
tmpType = str(f.attributes['type'].value)
if tmpType == 'resource':
tmpFile = XiboResourceFile(tmpFileName,tmpHash,tmpId,tmpType,tmpMtime)
else:
tmpFile = XiboFile(tmpFileName,tmpHash,tmpId,tmpType,tmpMtime)
md5Cache[tmpFileName] = tmpFile
except IOError:
log.log(0,"warning",_("Could not open cache.xml. Starting with an empty cache"),True)
except:
log.log(0,"warning",_("md5Cache file is corrupted. Ignoring."),True)
def run(self):
log.log(2,"info",_("New XiboDownloadManager instance started."))
while (self.running):
self.interval = 300
# Flag to note if on this loop we downloaded new files
updatedContent = False
# Find out how long we should wait between updates.
try:
self.interval = int(config.get('Main','xmdsUpdateInterval'))
except:
# self.interval has been set to a sensible default in this case.
log.log(0,"warning",_("No XMDS Update Interval specified in your configuration"),True)
log.log(0,"warning",_("Please check your xmdsUpdateInterval configuration option"))
log.log(0,"warning",_("A default value has been used:") + " " + str(self.interval) + " " + _("seconds"))
# Go through the list comparing required files to files we already have.
# If a file differs, queue it for download
reqFiles = '<files></files>'
try:
reqFiles = self.xmds.RequiredFiles()
log.log(5,"info",_("XiboDownloadManager: XMDS RequiredFiles() returned ") + str(reqFiles))
f = open(config.get('Main','libraryDir') + os.sep + 'rf.xml','w')
f.write(reqFiles)
f.close()
except IOError:
log.log(0,"error",_("Error trying to cache RequiredFiles to disk"),True)
except XMDSException:
log.log(0,"warning",_("XMDS RequiredFiles threw an exception"))
try:
try:
f = open(config.get('Main','libraryDir') + os.sep + 'rf.xml')
reqFiles = f.read()
finally:
f.close()
except:
# Couldn't read or file doesn't exist. Either way, return a blank list.
pass
self.doc = None
# Pull apart the retuned XML
try:
self.doc = minidom.parseString(reqFiles)
except:
log.log(0,"warning",_("XMDS RequiredFiles returned invalid XML"),True)
# Find the layout node and store it
if self.doc != None:
fileNodes = self.doc.getElementsByTagName('file')
for f in fileNodes:
# Does the file exist? Is it the right size?
if str(f.attributes['type'].value) == 'media':
try:
tmpPath = os.path.join(config.get('Main','libraryDir'),str(f.attributes['path'].value))
tmpFileName = str(f.attributes['path'].value)
tmpSize = long(f.attributes['size'].value)
tmpHash = str(f.attributes['md5'].value)
tmpType = str(f.attributes['type'].value)
try:
tmpId = int(f.attributes['id'].value)
except:
# Layout background images don't come down with IDs
# Blame Dan :D
tmpId = 0
if os.path.isfile(tmpPath) and os.path.getsize(tmpPath) == tmpSize:
# File exists and is the right size
# See if we checksummed it recently
if tmpFileName in md5Cache:
# Check if the md5 cache is old for this file
if md5Cache[tmpFileName].isExpired():
# Update the cache if it is
# print "*** It's 726 updating %s" % tmpFileName
md5Cache[tmpFileName].update()
if not md5Cache[tmpFileName].isValid():
# The hashes don't match.
# Queue for download.
log.log(2,"warning",_("File exists and is the correct size, but the checksum is incorrect. Queueing for download. ") + tmpFileName,True)
self.dlQueue.put((tmpType,tmpFileName,tmpSize,tmpHash,tmpId),False)
else:
# print "*** It's 735 and %s isn't in md5Cache" % tmpFileName
tmpFile = XiboFile(tmpFileName,tmpHash,tmpId,tmpType)
md5Cache[tmpFileName] = tmpFile
if not tmpFile.isValid():
# The hashes don't match.
# Queue for download.
log.log(2,"warning",_("File exists and is the correct size, but the checksum is incorrect. Queueing for download. ") + tmpFileName,True)
self.dlQueue.put((tmpType,tmpFileName,tmpSize,tmpHash,tmpId),False)
else:
# Queue the file for download later.
log.log(3,"info",_("File does not exist or is not the correct size. Queueing for download. ") + tmpFileName,True)
tmpFile = XiboFile(tmpFileName,tmpHash,tmpId,tmpType)
md5Cache[tmpFileName] = tmpFile
self.dlQueue.put((tmpType,tmpFileName,tmpSize,tmpHash,tmpId),False)
except:
# TODO: Blacklist the media item.
log.log(0,"error",_("RequiredFiles XML error: File type=media has no path attribute or no size attribute. Blacklisting."),True)
log.log(5,"audit",_("File " + tmpFileName + " is valid."))
elif str(f.attributes['type'].value) == 'layout':
# It's a Layout node.
try:
tmpPath = os.path.join(config.get('Main','libraryDir'),str(f.attributes['path'].value) + '.xlf')
tmpFileName = str(f.attributes['path'].value) + '.xlf'
tmpHash = str(f.attributes['md5'].value)
tmpType = str(f.attributes['type'].value)
tmpId = int(f.attributes['id'].value)
if os.path.isfile(tmpPath):
# File exists
# See if we checksummed it recently
if tmpFileName in md5Cache:
# Check if the md5 cache is old for this file
if md5Cache[tmpFileName].isExpired():
# Update the cache if it is
md5Cache[tmpFileName].update()
# The file is in cache, but has changed hash on the server
if md5Cache[tmpFileName].targetHash != tmpHash:
md5Cache[tmpFileName].targetHash = tmpHash
md5Cache[tmpFileName].update()
if md5Cache[tmpFileName].md5 != tmpHash:
# The hashes don't match.
# Queue for download.
log.log(2,"warning",_("File exists and is the correct size, but the checksum is incorrect. Queueing for download. ") + tmpFileName,True)
self.dlQueue.put((tmpType,tmpFileName,0,tmpHash,tmpId),False)
else:
tmpFile = XiboFile(tmpFileName,tmpHash,tmpId,tmpType)
md5Cache[tmpFileName] = tmpFile
if not tmpFile.isValid():
# The hashes don't match.
# Queue for download.
log.log(2,"warning",_("File exists and is the correct size, but the checksum is incorrect. Queueing for download. ") + tmpFileName,True)
self.dlQueue.put((tmpType,tmpFileName,0,tmpHash,tmpId),False)
else:
# Queue the file for download later.
log.log(3,"info",_("File does not exist. Queueing for download. ") + tmpFileName,True)
tmpFile = XiboFile(tmpFileName,tmpHash,tmpId,tmpType)
md5Cache[tmpFileName] = tmpFile
self.dlQueue.put((tmpType,tmpFileName,0,tmpHash,tmpId),False)
except:
# TODO: Blacklist the media item.
log.log(0,"error",_("RequiredFiles XML error: File type=layout has no path attribute or no hash attribute. Blacklisting."),True)
elif str(f.attributes['type'].value) == 'resource':
# It's a Layout node.
try:
tmpPath = os.path.join(config.get('Main','libraryDir'),str(f.attributes['mediaid'].value) + '-cache.html')
tmpFileName = str(f.attributes['mediaid'].value) + '-cache.html'
tmpRegionId = str(f.attributes['regionid'].value)
tmpType = str(f.attributes['type'].value)
tmpLayoutId = int(f.attributes['layoutid'].value)
if os.path.isfile(tmpPath):
# File exists
# See if we checksummed it recently
if tmpFileName in md5Cache:
# Check if the md5 cache is old for this file
if md5Cache[tmpFileName].isExpired():
# Update the cache if it is
md5Cache[tmpFileName].update()
else:
tmpFile = XiboResourceFile(tmpFileName,tmpHash,tmpId,tmpType)
md5Cache[tmpFileName] = tmpFile
if not tmpFile.isValid():
# The hashes don't match.
# Queue for download.
log.log(2,"warning",_("File exists and is the correct size, but the checksum is incorrect. Queueing for download. ") + tmpFileName,True)
self.dlQueue.put((tmpType,tmpFileName,0,tmpRegionId,tmpLayoutId),False)
else:
# Queue the file for download later.
log.log(3,"info",_("File does not exist. Queueing for download. ") + tmpFileName,True)
tmpFile = XiboResourceFile(tmpFileName,tmpHash,tmpId,tmpType)
md5Cache[tmpFileName] = tmpFile
self.dlQueue.put((tmpType,tmpFileName,0,tmpRegionId,tmpLayoutId),False)
except:
# TODO: Blacklist the media item.
log.log(0,"error",_("RequiredFiles XML error: File type=resource has no layoutid attribute or no regionid attribute. Blacklisting."),True)
elif str(f.attributes['type'].value) == 'blacklist':
# It's a Blacklist node
#log.log(5,"info","Blacklist File Node found!")
# TODO: Do something with the blacklist
pass
else:
# Unknown node. Ignore
pass
fileNodes = None
# End If self.doc != None
self.updateInfo()
self.updateMediaInventory()
# Loop over the queue and download as required
try:
# Throttle this to a maximum number of dl threads.
while True:
tmpType, tmpFileName, tmpSize, tmpHash, tmpId = self.dlQueue.get(False)
if config.get('Main','manualUpdate') == 'true':
log.lights('offlineUpdate','start')
# Check if the file is downloading already
if not tmpFileName in self.runningDownloads:
# Make a download thread and actually download the file.
# Add the running thread to the self.runningDownloads dictionary
self.runningDownloads[tmpFileName] = XiboDownloadThread(self,tmpType,tmpFileName,tmpSize,tmpHash,tmpId)
log.updateRunningDownloads(len(self.runningDownloads))
updatedContent = True
if self.offline:
# If we're running offline, block until completed.
self.runningDownloads[tmpFileName].run()
else:
self.runningDownloads[tmpFileName].start()
while len(self.runningDownloads) >= (self.maxDownloads - 1):
# There are no download thread slots free
# Sleep for 5 seconds and try again.
log.log(3,"info",_("All download slots filled. Waiting for a download slot to become free"))
time.sleep(5)
# End While
except Queue.Empty:
# Used to exit the above while once all items are downloaded.
pass
cacheXml = minidom.Document()
cacheXmlRoot = cacheXml.createElement("cache")
cacheXml.appendChild(cacheXmlRoot)
# Loop over the MD5 hash cache and remove any entries older than 1 hour
try:
for tmpFileName, tmpFile in md5Cache.iteritems():
if tmpFile.isExpired() and (not tmpFileName in self.runningDownloads):
md5Cache.pop(tmpFileName)
# Prepare to cache out to file
tmpFileInfo = tmpFile.toTuple()
tmpNode = cacheXml.createElement("file")
tmpNode.setAttribute("name",tmpFileName)
tmpNode.setAttribute("md5",tmpFileInfo[2])
tmpNode.setAttribute("mtime",str(tmpFileInfo[4]))
tmpNode.setAttribute("id",str(tmpFileInfo[5]))
tmpNode.setAttribute("type",str(tmpFileInfo[6]))
cacheXmlRoot.appendChild(tmpNode)
except RuntimeError:
# Tried to remove something from cache that wasn't there?
# Shouldn't happen
# Log it and deal with it
log.log(1,"error",_("Attempted to remove %s from cache but an error occured") % tmpFileName)
# Write the cache out to disk
try:
f = open(os.path.join(config.get('Main','libraryDir'),'cache.xml'),'w')
f.write(cacheXml.toprettyxml())
f.close()
except IOError:
log.log(0,"error",_("Unable to write cache.xml"),True)
# Force the cache to unlink and recover the RAM associated with it
cacheXml.unlink()
# End Loop
# Update the infoscreen.
self.updateInfo()
self.updateMediaInventory()
# Cleanup old files
if self.cleanup:
self.cleanOldMedia()
log.log(5,"audit",_("There are ") + str(threading.activeCount()) + _(" running threads."))
if config.getboolean('Main','manualUpdate'):
time.sleep(5)
log.lights('offlineUpdate','finish')
else:
log.log(3,"audit",_("XiboDownloadManager: Sleeping") + " " + str(self.interval) + " " + _("seconds"))
self.p.enqueue('timer',(int(self.interval) * 1000,self.collect))
if config.getboolean('Main','interruptRunningMediaOnUpdate') and updatedContent:
# If there was new stuff downloaded and interruptRunningMediaOnUpdate is true,
# skip to next layout.
self.parent.currentLM.dispose()
if self.nextLayoutOnComplete:
self.p.parent.currentLM.dispose()
if self.chainScheduler:
self.p.parent.scheduler.collect(True)
self.__lock.acquire()
# End While
def collect(self,flag=False,chainScheduler=False):
if len(self.runningDownloads) == 0:
self.nextLayoutOnComplete = flag
self.chainScheduler = chainScheduler
self.__lock.release()
else:
self.p.enqueue('timer',(60000,self.collect))
def dlThreadCompleteNotify(self,tmpFileName):
# Download thread completed. Log and remove from
# self.runningDownloads
log.log(3,"info",_("Download thread completed for ") + tmpFileName, True)
del self.runningDownloads[tmpFileName]
log.updateRunningDownloads(len(self.runningDownloads))
# Update the infoscreen
self.updateInfo()
self.updateMediaInventory()
def updateInfo(self):
# Update the info screen with information about the media
# and it's status
infoStr = ""
for tmpFileName, tmpFile in md5Cache.iteritems():
if tmpFile.isValid():
infoStr += tmpFileName + ", "
else:
infoStr += "<i>" + tmpFileName + "</i>, "
log.updateMedia(infoStr)
def updateMediaInventory(self):
# Silently return if in full offline mode
if config.getboolean('Main','manualUpdate'):
return
if not config.getboolean('Main','mediaInventory'):
return
# Get current md5Cache and send it back to the server
inventoryXml = minidom.Document()
inventoryXmlRoot = inventoryXml.createElement("files")
# Add the MAC address to the MediaInventory if possible
try:
inventoryXmlRoot.setAttribute("macAddress", self.xmds.getMac())
except:
pass
inventoryXml.appendChild(inventoryXmlRoot)
# Loop over the MD5 hash cache and build the inventory
try:
for tmpFileName, tmpFile in md5Cache.iteritems():
tmpFileInfo = tmpFile.toTuple()
tmpNode = inventoryXml.createElement("file")
if str(tmpFileInfo[6]) == 'resource':
tmpNode.setAttribute("regionid",str(tmpFileInfo[1]))
tmpNode.setAttribute("layoutid",str(tmpFileInfo[5]))
else:
tmpNode.setAttribute("md5",tmpFileInfo[1])
tmpNode.setAttribute("id",str(tmpFileInfo[5]))
# Convert unix timestamp to ISO format
tmpDt = datetime.datetime.fromtimestamp(tmpFileInfo[3])
tmpDt = tmpDt.strftime("%Y-%m-%d %H:%M:%S")
tmpNode.setAttribute("lastChecked",tmpDt)
tmpNode.setAttribute("type",str(tmpFileInfo[6]))
if tmpFile.isValid():
tmpNode.setAttribute("complete","1")
else:
tmpNode.setAttribute("complete","0")
inventoryXmlRoot.appendChild(tmpNode)
except:
log.log(0,'error',_('updateMediaInventory: Unknown error building inventoryXml'))
# Send via XMDS
try:
self.xmds.MediaInventory(inventoryXml.toprettyxml())
except XMDSException:
log.log(1,'error',_('Unable to send mediaInventory to the server via XMDS.'))
inventoryXml.unlink()
def cleanOldMedia(self):
# Check how recently we ran. Only run infrequently
now = time.time()
# Reserved files - never clean these:
reservedFiles = ['splash.jpg', '0.xlf',
'schedule.xml', 'rf.xml',
'cache.xml' ]
if now < self.lastCleanup + (60 * 60 * 18):
# Don't run cleanup this time
log.log(1,'info',_('CLEANUP: Skipping cleanup of media directory as we ran recently'))
return
self.lastCleanup = now
# Iterate over the media library and bin anything that has expired and is no longer in md5Cache
expireDays = config.getint('Main','mediaFileExpiry')
expireDT = now - (60 * 60 * 24 * expireDays)
libraryDir = config.get('Main','libraryDir')
log.log(1,'info',_('CLEANUP: Beginning cleanup of media directory'))
for fName in os.listdir(libraryDir):
if not os.path.isfile(os.path.join(libraryDir, fName)):
# Skip this item as it's not a file
log.log(8,'info',_('CLEANUP: Skipping %s as it\'s not a file') % fName)
continue
# Check if fName is in md5Cache
if fName in md5Cache:
# Skip this item as it's in use
log.log(8,'info',_('CLEANUP: Skipping %s as it\'s in use') % fName)
continue
if fName in reservedFiles:
# Skip files from the splash screen
log.log(8,'info',_('CLEANUP: Skipping %s as it\'s reserved or system') % fName)
continue
# Check if atime on the file is less than expireDT
try:
fAtime = os.path.getatime(os.path.join(libraryDir, fName))
except OSError:
# File must have vanished
# Skip it
log.log(8,'info',_('CLEANUP: Skipping %s as it seems to have vanished!') % fName)
pass
if fAtime < expireDT:
try:
os.remove(os.path.join(libraryDir, fName))
log.log(8,'info',_('CLEANUP: Deleted %s') % fName)
except:
log.log(0,'error',_('CLEANUP: Error deleting file %s from library') % fName)
else:
log.log(8,'info',_('CLEANUP: Skipping %s as it was accessed recently') % fName)
# Clean up the scaled directory too
for fName in os.listdir(os.path.join(libraryDir,'scaled')):
if not os.path.isfile(os.path.join(libraryDir, 'scaled', fName)):
# Skip this item as it's not a file
log.log(8,'info',_('CLEANUP: Skipping scaled/%s as it\'s not a file') % fName)
continue
# Check if atime on the file is less than expireDT
try:
fAtime = os.path.getatime(os.path.join(libraryDir, 'scaled', fName))
except OSError:
# File must have vanished
# Skip it
log.log(8,'info',_('CLEANUP: Skipping scaled/%s as it seems to have vanished!') % fName)
pass
if fAtime < expireDT:
try:
os.remove(os.path.join(libraryDir, 'scaled', fName))
log.log(8,'info',_('CLEANUP: Deleted scaled/%s') % fName)
except:
log.log(0,'error',_('CLEANUP: Error deleting file scaled/%s from library') % fName)
else:
log.log(8,'info',_('CLEANUP: Skipping scaled/%s as it was accessed recently') % fName)
log.log(1,'info',_('CLEANUP: Finished cleanup of media directory'))
class XiboDownloadThread(Thread):
def __init__(self,parent,tmpType,tmpFileName,tmpSize,tmpHash,tmpId):
Thread.__init__(self)
self.tmpType = tmpType
self.tmpId = tmpId
self.tmpFileName = tmpFileName
self.tmpPath = os.path.join(config.get('Main','libraryDir'),self.tmpFileName)
self.tmpSize = tmpSize
self.tmpHash = tmpHash
self.parent = parent
self.offset = long(0)
self.chunk = 512000
self.resumeDownloads = config.getboolean('Main','resumeDownloads')
# Server versions prior to 1.0.5 send an invalid md5sum for layouts that require
# the client to add a newline character to the returned layout to make it validate
# Should the client assume the server is pre-1.0.5?
try:
self.backCompat = config.getboolean('Main','backCompatLayoutChecksums')
except:
self.backCompat = False
def run(self):
# Manage downloading the appropriate type of file:
if self.tmpType == "media":
self.downloadMedia()
elif self.tmpType == "layout":
self.downloadLayout()
elif self.tmpType == "resource":
self.downloadResource()
# Let the DownloadManager know we're complete
self.parent.dlThreadCompleteNotify(self.tmpFileName)
def downloadMedia(self):
# Actually download the Media file
finished = False
tries = 0
if not self.resumeDownloads:
if os.path.isfile(self.tmpPath):
try:
log.log(5,"debug",_("Removing invalid file - resume downloads disabled: %s" % self.tmpPath), True)
os.remove(self.tmpPath)
except:
log.log(0,"error",_("Unable to delete file: ") + self.tmpPath, True)
try:
# See if file is already bigger than the target size.
# Bin it if it is
self.offset = long(os.path.getsize(self.tmpPath))
if self.offset >= self.tmpSize:
try:
log.log(5,"debug",_("Removing invalid file - too large: %s" % self.tmpPath), True)
os.remove(self.tmpPath)
except:
log.log(0,"error",_("Unable to delete file: ") + self.tmpPath, True)
self.offset = long(0)
except:
# File doesn't exist. Go for 0 offset
self.offset = long(0)
fh = None
try:
fh = open(self.tmpPath, 'ab')
except:
log.log(0,"error",_("Unable to write file: ") + self.tmpPath, True)
return
while tries < 5 and not finished:
tries = tries + 1
failCounter = 0
while self.offset < self.tmpSize and failCounter < 3:
# If downloading this chunk will complete the file
# work out exactly how much to download this time
if self.offset + self.chunk > self.tmpSize:
self.chunk = self.tmpSize - self.offset
try:
response = self.parent.xmds.GetFile(self.tmpFileName,self.tmpType,self.offset,self.chunk)
fh.write(response)
fh.flush()
self.offset = self.offset + self.chunk
failCounter = 0
except RuntimeError:
# TODO: Do something sensible
pass
except XMDSException:
# TODO: Do something sensible
failCounter = failCounter + 1
except ValueError:
finished = True
break
# End while offset<tmpSize
try:
fh.close()
except:
# TODO: Do something sensible
pass
# Check size/md5 here
tmpFile = XiboFile(self.tmpFileName,self.tmpHash,self.tmpId,self.tmpType)
if tmpFile.isValid():
finished = True
md5Cache[self.tmpFileName] = tmpFile
else:
try:
# Only delete the file at this point if the file got to full size.
# If not leave it in place for next run.
if offset == tmpSize:
log.log(5,"audit",_("Removing invalid file - checksum didn't match after download: %s" % self.tmpPath), True)
os.remove(self.tmpPath)
except:
log.log(0,"error",_("Unable to delete file: ") + self.tmpPath, True)
# End while
def downloadLayout(self):
# Actually download the Layout file
finished = False
tries = 0
if os.path.isfile(self.tmpPath):
try:
os.remove(self.tmpPath)
except:
log.log(0,"error",_("Unable to delete file: ") + self.tmpPath, True)
return
while tries < 5 and not finished:
tries = tries + 1
fh = None
try:
fh = open(self.tmpPath, 'wb')
except:
log.log(0,"error",_("Unable to write file: ") + self.tmpPath, True)
return
try:
response = self.parent.xmds.GetFile(self.tmpFileName,self.tmpType,0,0)
if self.backCompat:
fh.write(response + '\n')
else:
fh.write(response)
fh.flush()
except RuntimeError:
# TODO: Do something sensible
pass
except XMDSException:
# TODO: Do we need to do anything here?
pass
try:
fh.close()
except:
# TODO: Do something sensible
pass
# Check size/md5 here
tmpFile = XiboFile(self.tmpFileName,self.tmpHash,self.tmpId,self.tmpType)
if tmpFile.isValid():
finished = True
md5Cache[self.tmpFileName] = tmpFile
else:
log.log(4,"warning",_("File completed downloading but MD5 did not match.") + self.tmpFileName, True)
# End while
def downloadResource(self):
# Actually download the Layout file
finished = False
tries = 0
self.tmpMediaId = self.tmpFileName.replace('-cache.html','')
if os.path.isfile(self.tmpPath):
try:
os.remove(self.tmpPath)
except:
log.log(0,"error",_("Unable to delete file: ") + self.tmpPath, True)
return
while tries < 5 and not finished:
tries = tries + 1
fh = None
try:
fh = open(self.tmpPath, 'wb')
except:
log.log(0,"error",_("Unable to write file: ") + self.tmpPath, True)
return
try:
response = self.parent.xmds.GetResource(self.tmpId,self.tmpHash,self.tmpMediaId)
fh.write(response)
fh.flush()
except RuntimeError:
# TODO: Do something sensible
pass
except XMDSException:
# TODO: Do we need to do anything here?
pass
try:
fh.close()
except:
# TODO: Do something sensible
pass
# Check size/md5 here
tmpFile = XiboResourceFile(self.tmpFileName,self.tmpHash,self.tmpId,self.tmpType)
if tmpFile.isValid():
finished = True
md5Cache[self.tmpFileName] = tmpFile
else:
log.log(4,"warning",_("File completed downloading but MD5 did not match.") + self.tmpFileName, True)
# End while
#### Finish Download Manager
#### Layout/Region Management
class XiboLayoutManager(Thread):
def __init__(self,parent,player,layout,zindex=0,opacity=1.0,hold=False):
log.log(3,"info",_("New XiboLayoutManager instance created."))
self.p = player
self.l = layout
self.zindex = zindex
self.parent = parent
self.opacity = opacity
self.regions = []
self.layoutNodeName = None
self.layoutNodeNameExt = "-" + str(self.p.nextUniqueId())
self.layoutExpired = False
self.isPlaying = False
self.hold = hold
self.__regionLock = Semaphore()
self.__regionDisposeLock = Semaphore()
self.expiring = False
self.nextLayoutTriggered = False
Thread.__init__(self)
def run(self):
self.isPlaying = True
log.log(6,"info",_("%s XiboLayoutManager instance running.") % self.l.layoutID)
# Add a DIV to contain the whole layout (for transitioning whole layouts in to one another)
# TODO: Take account of the zindex parameter for transitions. Should this layout sit on top or underneath?
# Ensure that the layoutNodeName is unique on the player (incase we have to transition to ourself)
self.layoutNodeName = 'L' + str(self.l.layoutID) + self.layoutNodeNameExt
# Create the XML that will render the layoutNode.
tmpXML = '<div id="' + self.layoutNodeName + '" width="' + str(self.l.sWidth) + '" height="' + str(self.l.sHeight) + '" x="' + str(self.l.offsetX) + '" y="' + str(self.l.offsetY) + '" opacity="' + str(self.opacity) + '" crop="False" />'
self.p.enqueue('add',(tmpXML,'screen'))
# Add a ColorNode and maybe ImageNode to the layout div to draw the background
# This code will work with libavg > 0.8.x
try:
tmpXML = '<rect fillopacity="1" fillcolor="%s" color="%s" size="(%d,%d)" id="bgColor%s" />' % (self.l.backgroundColour.strip("#"),self.l.backgroundColour.strip("#"),self.l.sWidth,self.l.sHeight,self.layoutNodeNameExt)
self.p.enqueue('add',(tmpXML,self.layoutNodeName))
except AttributeError:
# The background colour isn't set for the layout.
# This is likely to be bad news as the XLF is already invalid.
# Log this, sleep and then load a different layout.
log.log(0,'error',_("Layout %s is invalid or corrupt. No background colour specified in the XLF. Skipping.") % self.l.layoutID)
time.sleep(5)
self.parent.nextLayout()
return
if self.l.backgroundImage != None:
# If there's a backgroud image, scale it to preserve texture memory
# If lowTextureMemory is true (eg on Intel Graphics Cards), use Thumbnail to
# produce much smaller image sizes.
if config.get('Main','lowTextureMemory') == "true":
w = int((self.l.sWidth + 1) * 1.1)
h = int((self.l.sHeight + 1) * 1.1)
else:
w = int(self.l.sWidth + 1)
h = int(self.l.sHeight + 1)
fName = os.path.join(config.get('Main','libraryDir'),self.l.backgroundImage)
thumb = os.path.join(config.get('Main','libraryDir'),'scaled',self.l.backgroundImage) + "-%dx%d" % (w,h)
if not os.path.exists(thumb) or (os.path.getmtime(thumb) < os.path.getmtime(fName)):
log.log(3,'info',_("%s: Resizing image %s to %dx%d") % (self.layoutNodeName,fName,w,h))
image = PIL.Image.open(fName)
if image.size == (w,h):
shutil.copyfile(fName, thumb)
else:
if config.get('Main','lowTextureMemory') == "true":
image.thumbnail((w,h),PIL.Image.ANTIALIAS)
else:
image.resize((w,h),PIL.Image.ANTIALIAS)
image.save(thumb, image.format, quality=95)
del image
tmpXML = str('<image width="%d" height="%d" id="bg%s" opacity="1.0" />' % (self.l.sWidth,self.l.sHeight,self.layoutNodeNameExt))
self.p.enqueue('add',(tmpXML,self.layoutNodeName))
bitmap = avg.Bitmap(thumb)
self.p.enqueue('setBitmap',("bg%s" % self.layoutNodeNameExt, bitmap))
# Break layout in to regions
# Spawn a region manager for each region and then start them all running
# Log each region in an array for checking later.
for cn in self.l.children():
if cn.nodeType == cn.ELEMENT_NODE and cn.localName == "region":
# Create a new Region Manager Thread and kick it running.
# Pass in cn since it contains the XML for the whole region
# TODO: Instead of starting here, we need to sort the regions array by zindex attribute
# then start in ascending order to ensure rendering happens in layers correctly.
tmpRegion = XiboRegionManager(self, self.p, self.layoutNodeName, self.layoutNodeNameExt, cn)
log.log(2,"info",_("XiboLayoutManager: run() -> Starting new XiboRegionManager."))
tmpRegion.start()
# Store a reference to the region so we can talk to it later
self.regions.append(tmpRegion)
def regionElapsed(self):
log.log(2,"info",_("%s Region elapsed. Checking if layout has elapsed") % self.layoutNodeName)
allExpired = True
for i in self.regions:
if i.regionExpired == False:
log.log(3,"info",_("%s Region " + i.regionNodeName + " has not expired. Waiting") % self.layoutNodeName)
allExpired = False
return False
self.__regionLock.acquire()
if allExpired and not self.expiring:
log.log(2,"info",_("%s All regions have expired. Marking layout as expired") % self.layoutNodeName, True)
if self.hold:
log.log(1,"info",_("Holding the splash screen until we're told otherwise"), True)
self.__regionLock.release()
return False
self.layoutExpired = True
self.expiring = True
# Enqueue region exit transitions by calling the dispose method on each regionManager
for i in self.regions:
i.dispose()
self.__regionLock.release()
return True
else:
self.__regionLock.release()
if allExpired:
return True
return False
def regionDisposed(self):
log.log(2,"info",_("%s Region disposed. Checking if all regions have disposed") % self.layoutNodeName)
allExpired = True
for i in self.regions:
if i.disposed == False:
log.log(3,"info",_("%s Region %s has not disposed. Waiting") % (self.layoutNodeName,i.regionNodeName))
allExpired = False
self.__regionDisposeLock.acquire()
if allExpired == True and not self.nextLayoutTriggered:
log.log(2,"info",_("%s All regions have disposed. Marking layout as disposed") % self.layoutNodeName, True)
self.layoutDisposed = True
log.log(2,"info",_("LayoutManager->parent->nextLayout()"))
self.nextLayoutTriggered = True
self.parent.nextLayout()
self.__regionDisposeLock.release()
def dispose(self):
# Enqueue region exit transitions by calling the dispose method on each regionManager
for i in self.regions:
i.dispose()
# TODO: Remove this? The exiting layout should be left for a transition object to transition with.
# Leaving in place for testing though.
# self.p.enqueue("reset","")
class XiboRegionManager(Thread):
class ConcurrencyManager:
def __init__(self,parent):
self.parent = parent
self.done = False
def next(self):
if not self.done:
self.done = True
self.parent.next()
def getConcurrencyManager(self):
return self.ConcurrencyManager(self)
def __init__(self,parent,player,layoutNodeName,layoutNodeNameExt,cn):
log.log(3,"info",_("New XiboRegionManager instance created."))
Thread.__init__(self)
# Semaphore used to block this thread's execution once it has passed control off to the Media thread.
# Lock is released by a callback from the libavg player (which returns control to this thread such that the
# player thread never blocks.
self.lock = Semaphore()
self.tLock = Semaphore()
# Variables
self.p = player
self.parent = parent
self.regionNode = cn
self.layoutNodeName = layoutNodeName
self.layoutNodeNameExt = layoutNodeNameExt
self.regionExpired = False
self.regionNodeNameExt = "-" + str(self.p.nextUniqueId())
self.regionNodeName = None
self.width = None
self.height = None
self.top = None
self.left = None
self.zindex = None
self.disposed = False
self.disposing = False
self.oneItemOnly = False
self.previousMedia = None
self.currentMedia = None
self.regionId = None
self.numNodes = 0
self.textErrorFlag = False
# Calculate the region ID name
try:
self.regionNodeName = "R" + str(self.regionNode.attributes['id'].value) + self.regionNodeNameExt
self.regionId = str(self.regionNode.attributes['id'].value)
except KeyError:
log.log(1,"error",_("Region XLF is invalid. Missing required id attribute"), True)
self.regionExpired = True
self.parent.regionElapsed()
return
# Calculate the region width
try:
self.width = float(self.regionNode.attributes['width'].value) * parent.l.scaleFactor
self.originalWidth = self.regionNode.attributes['width'].value
except KeyError:
log.log(1,"error",_("Region XLF is invalid. Missing required width attribute"), True)
self.regionExpired = True
self.parent.regionElapsed()
return
# Calculate the region height
try:
self.height = float(self.regionNode.attributes['height'].value) * parent.l.scaleFactor
self.originalHeight = self.regionNode.attributes['height'].value
except KeyError:
log.log(1,"error",_("Region XLF is invalid. Missing required height attribute"), True)
self.regionExpired = True
self.parent.regionElapsed()
return
# Calculate the region top
try:
self.top = float(self.regionNode.attributes['top'].value) * parent.l.scaleFactor
except KeyError:
log.log(1,"error",_("Region XLF is invalid. Missing required top attribute"), True)
self.regionExpired = True
self.parent.regionElapsed()
return
# Calculate the region left
try:
self.left = float(self.regionNode.attributes['left'].value) * parent.l.scaleFactor
except KeyError:
log.log(1,"error",_("Region XLF is invalid. Missing required left attribute"))
self.regionExpired = True
self.parent.regionElapsed()
return
# Get region zindex
try:
self.zindex = int(float(self.regionNode.attributes['zindex'].value))
except KeyError:
self.zindex = 1
# Work out how many media nodes there are
for cn in self.regionNode.childNodes:
self.numNodes += 1
# Create a div for the region and add it
tmpXML = '<div id="' + self.regionNodeName + '" width="' + str(self.width) + '" height="' + str(self.height) + '" x="' + str(self.left) + '" y="' + str(self.top) + '" opacity="1.0" crop="True" />'
self.p.enqueue('add',(tmpXML,self.layoutNodeName))
def run(self):
self.lock.acquire()
self.tLock.acquire()
log.log(3,"info",_("New XiboRegionManager instance running for region:") + self.regionNodeName)
# * Iterate through the media items
# -> For each media, display on screen and set a timer to cause the next item to be shown
# -> attempt to acquire self.lock - which will block this thread. We will be woken by the callback
# to next() by the libavg player.
# * When all items complete, mark region complete by setting regionExpired = True and calling parent.regionElapsed()
mediaCount = 0
while self.disposed == False and self.oneItemOnly == False and self.disposing == False:
for cn in self.regionNode.childNodes:
if cn.nodeType == cn.ELEMENT_NODE and cn.localName == "media":
log.log(3,"info","%s: Moving to next Media item" % self.regionNodeName)
mediaCount = mediaCount + 1
if self.disposed == False and self.disposing == False:
type = str(cn.attributes['type'].value)
type = type[0:1].upper() + type[1:]
log.log(4,"info","%s: Media is of type: %s" % (self.regionNodeName,type))
try:
import plugins.media
__import__("plugins.media." + type + "Media",None,None,[''])
self.currentMedia = eval("plugins.media." + type + "Media." + type + "Media")(log,config,self,self.p,cn)
# Apply (multiple or none) media effects here
import plugins.effects
tmpEffects = []
for cn in self.currentMedia.effects:
eType = str(cn.attributes['type'].value)
eType = eType[0:1].upper() + eType[1:]
__import__("plugins.effects." + eType + "Effect",None,None,[''])
tmpE = eval("plugins.effects." + eType + "Effect." + eType + "Effect")(log,self.p,self.currentMedia.mediaNodeName,cn)
tmpEffects.append(tmpE)
# Transition between media here...
import plugins.transitions
try:
tmp1 = str(self.previousMedia.options['transOut'])
tmp1 = tmp1[0:1].upper() + tmp1[1:]
except:
tmp1 = ""
try:
tmp2 = str(self.currentMedia.options['transIn'])
tmp2 = tmp2[0:1].upper() + tmp2[1:]
except:
tmp2 = ""
trans = (tmp1,tmp2)
log.log(3,"info",self.regionNodeName + ": " + _("Beginning transitions: " + str(trans)))
# The two transitions match. Let one plugin handle both.
if (trans[0] == trans[1]) and trans[0] != "":
self.currentMedia.add()
for e in tmpEffects:
e.start()
try:
__import__("plugins.transitions." + trans[0] + "Transition",None,None,[''])
tmpTransition = eval("plugins.transitions." + trans[0] + "Transition." + trans[0] + "Transition")(log,self.p,self.previousMedia,self.currentMedia,self.tNext)
tmpTransition.start()
except ImportError:
__import__("plugins.transitions.DefaultTransition",None,None,[''])
tmpTransition = plugins.transitions.DefaultTransition.DefaultTransition(log,self.p,self.previousMedia,self.currentMedia,self.tNext)
tmpTransition.start()
self.tLock.acquire()
else:
# The two transitions don't match.
# Create two transition plugins and run them sequentially.
if (trans[0] != ""):
try:
__import__("plugins.transitions." + trans[0] + "Transition",None,None,[''])
tmpTransition = eval("plugins.transitions." + trans[0] + "Transition." + trans[0] + "Transition")(log,self.p,self.previousMedia,None,self.tNext)
tmpTransition.start()
except ImportError:
__import__("plugins.transitions.DefaultTransition",None,None,[''])
tmpTransition = plugins.transitions.DefaultTransition.DefaultTransition(log,self.p,self.previousMedia,None,self.tNext)
tmpTransition.start()
self.tLock.acquire()
if (trans[1] != ""):
self.currentMedia.add()
for e in tmpEffects:
e.start()
try:
__import__("plugins.transitions." + trans[1] + "Transition",None,None,[''])
tmpTransition = eval("plugins.transitions." + trans[1] + "Transition." + trans[1] + "Transition")(log,self.p,None,self.currentMedia,self.tNext)
tmpTransition.start()
except ImportError:
__import__("plugins.transitions.DefaultTransition",None,None,[''])
tmpTransition = plugins.transitions.DefaultTransition.DefaultTransition(log,self.p,None,self.currentMedia,self.tNext)
tmpTransition.start()
self.tLock.acquire()
else:
self.currentMedia.add()
self.currentMedia.start()
for e in tmpEffects:
e.start()
# Cleanup
try:
# TODO: I removed an if self.disposing == False: here
# I _think_ this was just me being paranoid on getting rid of exceptions thrown by the player
# but it's more important that the media node knows it has disposed for stats generation.
# Tell the media node to dispose itself.
self.previousMedia.dispose()
self.tLock.acquire()
except AttributeError:
pass
if self.disposing == False and self.disposed == False:
# Wait for the new media to finish
self.lock.acquire()
self.previousMedia = self.currentMedia
self.currentMedia = None
except ImportError as detail:
log.log(0,"error","Missing media plugin for media type " + type + ": " + str(detail), True)
# TODO: Do something with this layout? Blacklist?
self.lock.release()
# If there's no items, pause for a while to allow other RegionManagers to get up and running.
if mediaCount == 0:
self.oneItemOnly = True
log.log(3,"info",_("Region has no media: ") + self.regionNodeName)
time.sleep(2)
self.regionExpired = True
# print str(self.regionNodeName) + " has expired"
if self.parent.regionElapsed():
# If regionElapsed returns True, then the layout is on its way out so stop looping
# Acheived by pretending to be a single item region
self.oneItemOnly = True
# If there's only one item, render it and leave it alone!
if mediaCount == 1:
if not self.textErrorFlag:
self.oneItemOnly = True
log.log(3,"info",_("Region has only one media: ") + self.regionNodeName)
self.textErrorFlag = False
# End while loop
def next(self):
# Release the lock semaphore so that the run() method of the thread can continue.
# Called by a callback from libavg
# log.log(3,"info",_("XiboRegionManager") + " " + self.regionNodeName + ": " + _("Next Media Item"))
# Do nothing if the layout has already been removed from the screen
if self.disposed == True or self.disposing == True:
return
self.lock.release()
def tNext(self):
if self.disposed == True:
return
self.tLock.release()
def textError(self):
# Flag that the text rendering for the child media failed
self.textErrorFlag = True
def dispose(self):
self.disposing = True
log.log(5,"info",self.regionNodeName + " is disposing.")
rOptions = {}
oNode = None
# Perform any region exit transitions
for cn in self.regionNode.childNodes:
if cn.nodeType == cn.ELEMENT_NODE and cn.localName == "options":
oNode = cn
try:
for cn in oNode.childNodes:
if not cn.localName is None:
if len(cn.childNodes) > 0:
rOptions[str(cn.localName)] = cn.childNodes[0].nodeValue
log.log(5,"info","Region Options: " + str(cn.localName) + " -> " + str(cn.childNodes[0].nodeValue))
else:
rOptions[str(cn.localName)] = ""
except AttributeError:
rOptions["transOut"] = ""
# Make the transition objects and pass in options
# Once animation complete, they should call back to self.disposeTransitionComplete()
transOut = str(rOptions["transOut"])
if (transOut != ""):
import plugins.transitions
transOut = transOut[0:1].upper() + transOut[1:]
log.log(5,"info",self.regionNodeName + " starting exit transition")
try:
__import__("plugins.transitions." + transOut + "Transition",None,None,[''])
tmpTransition = eval("plugins.transitions." + transOut + "Transition." + transOut + "Transition")(log,self.p,self.previousMedia,None,self.disposeTransitionComplete,rOptions,None)
tmpTransition.start()
log.log(5,"info",self.regionNodeName + " control passed to Transition object.")
except ImportError as detail:
log.log(3,"error",self.regionNodeName + ": Unable to import requested Transition plugin. " + str(detail), True)
self.disposeTransitionComplete()
else:
self.disposeTransitionComplete()
def disposeTransitionComplete(self):
# Notify the LayoutManager when these are complete.
log.log(5,"info",self.regionNodeName + " is disposed.")
self.disposed = True
self.parent.regionDisposed()
# Unlock the media loop and allow it to complete.
self.lock.release()
self.tLock.release()
#### Finish Layout/Region Managment
#### Scheduler Classes
class XiboLayout:
def __init__(self,layoutID,isDefault):
self.layoutID = layoutID
self.isDefault = isDefault
self.__mtime = 0
self.schedule = []
self.__setup()
def __setup(self):
self.builtWithNoXLF = False
self.layoutNode = None
self.iter = None
if not int(config.get('Main','vwidth')) == 0:
self.playerWidth = int(config.get('Main','vwidth'))
self.playerHeight = int(config.get('Main','vheight'))
else:
self.playerWidth = int(config.get('Main','width'))
self.playerHeight = int(config.get('Main','height'))
# Attributes
self.width = None
self.height = None
self.sWidth = None
self.sHeight = None
self.offsetX = 0
self.offsetY = 0
self.scaleFactor = 1
self.backgroundImage = None
self.backgroundColour = None
# Tags assinged to this layout
self.tags = []
# Array of media names (to check against md5Cache later!)
self.media = []
# Checks
self.schemaCheck = False
self.mediaCheck = False
self.scheduleCheck = False
self.pluginCheck = True
if self.layoutID == "0":
try:
if not os.path.isfile(os.path.join(config.get('Main','libraryDir'),'0.xlf')):
import shutil
shutil.copy(os.path.join('resources','0.xlf'),config.get('Main','libraryDir'))
if not os.path.isfile(os.path.join(config.get('Main','libraryDir'),'splash.jpg')):
import shutil
shutil.copy(os.path.join('resources','splash.jpg'),config.get('Main','libraryDir'))
except IOError:
log.log(0,"error",_("Unable to write to libraryDir %s") % config.get('Main','libraryDir'), True)
# Read XLF from file (if it exists)
# Set builtWithNoXLF = True if it doesn't
try:
log.log(3,"info",_("Loading layout ID") + " " + self.layoutID + " " + _("from file") + " " + config.get('Main','libraryDir') + os.sep + self.layoutID + '.xlf')
self.doc = minidom.parse(config.get('Main','libraryDir') + os.sep + self.layoutID + '.xlf')
self.__mtime = os.path.getmtime(config.get('Main','libraryDir') + os.sep + self.layoutID + '.xlf')
# Find the layout node and store it
for e in self.doc.childNodes:
| if e.nodeType == e.ELEMENT_NODE and e.localName == "layout": | 8,661 | lcc_e | python | null | e2895e568551293781414fe52b2571117dff76a79196d8e6 |
|
from __future__ import absolute_import, print_function, division
"""
Tensor optimizations addressing the ops in basic.py.
"""
# TODO: intelligent merge for mul/add
# TODO: 0*x -> 0
from collections import defaultdict
import logging
import itertools
import operator
import sys
import time
import traceback
import warnings
import numpy
from six import integer_types, iteritems
from six.moves import reduce, xrange
import theano
from theano import gof
from theano.compat import izip
from theano.gof import opt, InconsistencyError, TopoOptimizer, graph
from theano.gof import Variable, Constant
from theano.gof.opt import copy_stack_trace, in2out
from theano.gof.utils import MethodNotDefined
from theano.gradient import DisconnectedType
from theano.configparser import config
from theano.tensor.elemwise import Elemwise, DimShuffle
from theano.tensor.subtensor import (get_idx_list, get_canonical_form_slice,
Subtensor, IncSubtensor, make_constant,
AdvancedIncSubtensor1,
AdvancedIncSubtensor,
AdvancedSubtensor1,
advanced_subtensor,
advanced_subtensor1,
advanced_inc_subtensor1)
from theano import scalar
from theano.scalar import basic
from theano.tensor import basic as T
from theano import compile # to register the optimizer built by this file
from theano.compile.ops import Shape, Shape_i
from theano.tensor.type import (values_eq_approx_remove_inf,
values_eq_approx_remove_nan,
values_eq_approx_remove_inf_nan)
from theano.gof.opt import (Optimizer, pre_constant_merge,
pre_greedy_local_optimizer)
from theano.gof import toolbox
from theano.tensor.basic import (Alloc, get_scalar_constant_value, ShapeError,
extract_constant, NotScalarConstantError,
Reshape)
from six import StringIO
_logger = logging.getLogger('theano.tensor.opt')
# Utilities
def _fill_chain(new_out, orig_inputs):
for i in orig_inputs:
new_out = T.fill(i, new_out)
return [new_out]
def encompasses_broadcastable(b1, b2):
"""
Parameters
----------
b1
The broadcastable attribute of a tensor type.
b2
The broadcastable attribute of a tensor type.
Returns
-------
bool
True if the broadcastable patterns b1 and b2 are such that b2 is
broadcasted to b1's shape and not the opposite.
"""
if len(b1) < len(b2):
return False
b1 = b1[-len(b2):]
return not any(v1 and not v2 for v1, v2 in zip(b1, b2))
def merge_broadcastables(broadcastables):
return [all(bcast) for bcast in zip(*broadcastables)]
def scalarconsts_rest(inputs, elemwise=True, only_process_constants=False):
"""Partition a list of variables into two kinds:
scalar constants, and the rest."""
consts = []
origconsts = []
nonconsts = []
for i in inputs:
try:
v = get_scalar_constant_value(i, elemwise=elemwise,
only_process_constants=only_process_constants)
consts.append(v)
origconsts.append(i)
except NotScalarConstantError:
nonconsts.append(i)
return consts, origconsts, nonconsts
def broadcast_like(value, template, fgraph, dtype=None):
"""
Return a Variable with the same shape and dtype as the template,
filled by broadcasting value through it. `value` will be cast as
necessary.
"""
value = T.as_tensor_variable(value)
if value.type == template.type:
return value
if template not in fgraph.variables:
raise NotImplementedError('broadcast_like currently requires the '
'template Variable to be in the fgraph already')
if dtype is None:
dtype = template.dtype
value = T.cast(value, dtype)
if value.type == template.type:
return value
if hasattr(fgraph, 'shape_feature'):
new_shape = fgraph.shape_feature.shape_of[template]
else:
new_shape = template.shape
rval = T.alloc(value, *new_shape)
# the template may have 1s in its shape without being broadcastable
if rval.broadcastable != template.broadcastable:
rval = T.unbroadcast(rval, *[i for i in xrange(rval.ndim)
if rval.broadcastable[i] and
not template.broadcastable[i]])
assert rval.type.dtype == dtype
if rval.type.broadcastable != template.broadcastable:
raise AssertionError("rval.type.broadcastable is " +
str(rval.type.broadcastable) +
" but template.broadcastable is" +
str(template.broadcastable))
return rval
class InplaceElemwiseOptimizer(Optimizer):
"""
We parametrise it to make it work for Elemwise and GpuElemwise op.
"""
def __init__(self, OP):
self.op = OP
def add_requirements(self, fgraph):
fgraph.attach_feature(theano.gof.destroyhandler.DestroyHandler())
@staticmethod
def print_profile(stream, prof, level=0):
blanc = (' ' * level)
print(blanc, "InplaceElemwiseOptimizer ", prof['opt'].op, file=stream)
for k in ['node_before',
'nb_call_replace',
'nb_call_validate',
'nb_inconsistent']:
print(blanc, k, prof[k], file=stream)
ndim = prof['ndim']
if ndim:
print(blanc, "ndim", "nb", file=stream)
for n in sorted(ndim.keys()):
print(blanc, n, ndim[n], file=stream)
def apply(self, fgraph):
"""
Usage: InplaceElemwiseOptimizer(op).optimize(fgraph)
Attempts to replace all Broadcast ops by versions of them
that operate inplace. It operates greedily: for each Broadcast
Op that is encountered, for each output, tries each input to
see if it can operate inplace on that input. If so, makes the
change and go to the next output or Broadcast Op.
Examples
--------
`x + y + z -> x += y += z`
`(x + y) * (x * y) -> (x += y) *= (x * y) or (x + y) *= (x *= y)`
"""
# We should not validate too often as this takes too much time to
# execute!
# It is the _dfs_toposort() fct in theano/gof/destroyhandler.py
# that takes so much time.
# Should we try to use another lib that does toposort?
# igraph: http://igraph.sourceforge.net/
# networkx: https://networkx.lanl.gov/
# Should we try to use cython?
# Compiling only that fct is not enough, should we try to add the
# deque class too?
# And init the deque and other list to an upper bound number of
# elements?
# Maybe Theano should do online toposort as in
# http://code.google.com/p/acyclic
#
# The next longest optimizer is the canonizer phase.
# Then I think it is the [io_?]toposort (need to validate) so check if
# the solution is also applicable there.
# We execute `validate` after this number of change.
prof = {'opt': self,
'node_before': len(fgraph.apply_nodes),
'nb_call_replace': 0,
'nb_call_validate': 0,
'nb_inconsistent': 0,
'ndim': defaultdict(lambda: 0)}
check_each_change = config.tensor.insert_inplace_optimizer_validate_nb
if check_each_change == -1:
if len(fgraph.apply_nodes) > 500:
check_each_change = 10
else:
check_each_change = 1
nb_change_no_validate = 0
chk = fgraph.checkpoint()
if fgraph.update_mapping:
update_outs = [fgraph.outputs[i] for i in fgraph.update_mapping]
else:
update_outs = []
protected_inputs = [
f.protected for f in fgraph._features if
isinstance(f, theano.compile.function_module.Supervisor)]
protected_inputs = sum(protected_inputs, []) # flatten the list
protected_inputs.extend(fgraph.outputs)
for node in list(graph.io_toposort(fgraph.inputs, fgraph.outputs)):
op = node.op
# gpuarray GpuElemwise inherit from Elemwise
if not type(op) == self.op:
continue
# If big graph and the outputs are scalar, do not make it
# inplace.
if (check_each_change != 1 and
# If multiple outputs, they must all have the same size,
# so only check the first.
getattr(node.outputs[0].type, 'ndim', -1) == 0):
continue
if op.inplace_pattern:
# Maybe this isn't needed anymore, but I don't want to
# rish regression now. This case only happen if the
# original node add already some inplace patter and we
# still try to add more pattern.
baseline = op.inplace_pattern
candidate_outputs = [i for i in xrange(len(node.outputs))
if i not in baseline]
# node inputs that are Constant, already destroyed,
# or fgraph protected inputs and fgraph outputs can't be used as
# inplace target.
# Remove here as faster.
candidate_inputs = [i for i in xrange(len(node.inputs))
if i not in baseline.values() and
not isinstance(node.inputs[i], Constant) and
# Is next line costly?
not fgraph.destroyers(node.inputs[i]) and
node.inputs[i] not in protected_inputs]
else:
baseline = []
candidate_outputs = list(range(len(node.outputs)))
# node inputs that are Constant, already destroyed,
# fgraph protected inputs and fgraph outputs can't be used as inplace
# target.
# Remove here as faster.
candidate_inputs = [i for i in xrange(len(node.inputs))
if not isinstance(node.inputs[i], Constant) and
not fgraph.destroyers(node.inputs[i]) and
node.inputs[i] not in protected_inputs]
verbose = False
raised_warning = not verbose
for candidate_output in candidate_outputs:
# If the output of the node can be established as an update
# output of the fgraph, visit the candidate_inputs in an order
# that will improve the chances of making the node operate
# inplace on the input it's meant to update
candidate_out_var = node.outputs[candidate_output]
sorted_candidate_inputs = candidate_inputs
if candidate_out_var in update_outs:
# The candidate output is an update. Sort the
# variables in candidate_inputs in the following order:
# - Vars corresponding to the actual updated input
# (best case scenario is for the node that procudes
# an update to operate inplace on the variable to
# update)
# - Vars computed inplace on the updates input (second
# best scenario if for the node to work inplace on
# a variable obtained by a chain of inplace on the
# variable to update. In some cases, this will be
# equivalent to operating inplace on the variable to
# update)
# - Remaining variables
updated_inputs = []
for i, f_out in enumerate(fgraph.outputs):
if (f_out is candidate_out_var and i in fgraph.update_mapping):
updated_inp_idx = fgraph.update_mapping[i]
updated_inputs.append(fgraph.inputs[updated_inp_idx])
updated_vars = []
vars_from_inplace = []
other_vars = []
for inp_idx in candidate_inputs:
inp = node.inputs[inp_idx]
if inp in updated_inputs:
# the candidate input is the actual updated input
updated_vars.append(inp_idx)
elif (hasattr(fgraph, 'destroy_handler') and
inp.owner and
any([fgraph.destroy_handler.root_destroyer.get(up_inp, None) is inp.owner
for up_inp in updated_inputs])):
# the candidate input is a variable computed
# inplace on the updated input via a sequence of
# one or more inplace operations
vars_from_inplace.append(inp_idx)
else:
other_vars.append(inp_idx)
sorted_candidate_inputs = (updated_vars +
vars_from_inplace + other_vars)
for candidate_input in sorted_candidate_inputs:
# remove inputs that don't have the same dtype as the output
if node.inputs[candidate_input].type != node.outputs[
candidate_output].type:
continue
inplace_pattern = dict(baseline)
inplace_pattern[candidate_output] = candidate_input
try:
if hasattr(op.scalar_op, "make_new_inplace"):
new_scal = op.scalar_op.make_new_inplace(
scalar.transfer_type(
*[inplace_pattern.get(i, o.dtype)
for i, o in enumerate(node.outputs)]))
else:
new_scal = op.scalar_op.__class__(
scalar.transfer_type(
*[inplace_pattern.get(i, None)
for i in xrange(len(node.outputs))]))
new_outputs = self.op(new_scal, inplace_pattern)(
*node.inputs, **dict(return_list=True))
new_node = new_outputs[0].owner
for r, new_r in zip(node.outputs, new_outputs):
prof['nb_call_replace'] += 1
fgraph.replace(r, new_r,
reason="inplace_elemwise_optimizer")
nb_change_no_validate += 1
prof['ndim'][candidate_out_var.ndim] += 1
if nb_change_no_validate >= check_each_change:
prof['nb_call_validate'] += 1
fgraph.validate()
chk = fgraph.checkpoint()
nb_change_no_validate = 0
except (ValueError, InconsistencyError) as e:
prof['nb_inconsistent'] += 1
if check_each_change != 1 and not raised_warning:
print(("Some inplace optimization was not "
"performed due to unexpected error:"),
file=sys.stderr)
print(e, file=sys.stderr)
raised_warning = True
fgraph.revert(chk)
continue
candidate_inputs.remove(candidate_input)
node = new_node
baseline = inplace_pattern
break
if nb_change_no_validate > 0:
try:
fgraph.validate()
except Exception:
if not raised_warning:
print(("Some inplace optimization was not "
"performed due to unexpected error"),
file=sys.stderr)
fgraph.revert(chk)
return prof
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print("%s%s (%s)" % (
(' ' * level), self.__class__.__name__, self.op), file=stream)
return inplace_elemwise_optimizer
inplace_elemwise_optimizer = InplaceElemwiseOptimizer(T.Elemwise)
compile.optdb.register('inplace_elemwise_opt', inplace_elemwise_optimizer, 75,
'inplace_opt', # for historic reason
'inplace_elemwise_optimizer',
'fast_run', 'inplace')
def register_useless(lopt, *tags, **kwargs):
if type(lopt) == str:
def register(inner_lopt):
return register_useless(inner_lopt, lopt, *tags, **kwargs)
return register
else:
name = kwargs.pop('name', None) or lopt.__name__
compile.mode.local_useless.register(name, lopt, 'last', 'fast_run',
*tags, **kwargs)
return lopt
def register_canonicalize(lopt, *tags, **kwargs):
if type(lopt) == str:
def register(inner_lopt):
return register_canonicalize(inner_lopt, lopt, *tags, **kwargs)
return register
else:
name = kwargs.pop('name', None) or lopt.__name__
compile.optdb['canonicalize'].register(name, lopt, 'fast_run',
*tags, **kwargs)
return lopt
def register_stabilize(lopt, *tags, **kwargs):
if type(lopt) == str:
def register(inner_lopt):
return register_stabilize(inner_lopt, lopt, *tags, **kwargs)
return register
else:
name = kwargs.pop('name', None) or lopt.__name__
compile.optdb['stabilize'].register(name, lopt, 'fast_run',
*tags, **kwargs)
return lopt
def register_specialize(lopt, *tags, **kwargs):
if type(lopt) == str:
def register(inner_lopt):
return register_specialize(inner_lopt, lopt, *tags, **kwargs)
return register
else:
name = kwargs.pop('name', None) or lopt.__name__
compile.optdb['specialize'].register(name, lopt, 'fast_run',
*tags, **kwargs)
return lopt
def register_uncanonicalize(lopt, *tags, **kwargs):
if type(lopt) == str:
def register(inner_lopt):
return register_uncanonicalize(inner_lopt, lopt, *tags, **kwargs)
return register
else:
name = (kwargs and kwargs.pop('name', None)) or lopt.__name__
compile.optdb['uncanonicalize'].register(name, lopt, 'fast_run', *tags,
**kwargs)
return lopt
def register_specialize_device(lopt, *tags, **kwargs):
if type(lopt) == str:
def register(inner_lopt):
return register_specialize_device(inner_lopt, lopt, *tags, **kwargs)
return register
else:
name = (kwargs and kwargs.pop('name', None)) or lopt.__name__
compile.optdb['specialize_device'].register(name, lopt, 'fast_run', *tags,
**kwargs)
return lopt
#####################
# Dot optimizations #
#####################
@register_canonicalize
@register_stabilize
@gof.local_optimizer([T.Dot])
def local_0_dot_x(node):
if not isinstance(node.op, T.Dot):
return False
x = node.inputs[0]
y = node.inputs[1]
replace = False
try:
if get_scalar_constant_value(x, only_process_constants=True) == 0:
replace = True
except NotScalarConstantError:
pass
try:
if get_scalar_constant_value(y, only_process_constants=True) == 0:
replace = True
except NotScalarConstantError:
pass
if replace:
constant_zero = T.constant(0, dtype=node.outputs[0].type.dtype)
if x.ndim == 2 and y.ndim == 2:
constant_zero = assert_(constant_zero,
T.eq(x.shape[1], y.shape[0]))
return [T.alloc(constant_zero, x.shape[0], y.shape[1])]
elif x.ndim == 1 and y.ndim == 2:
constant_zero = assert_(constant_zero,
T.eq(x.shape[0], y.shape[0]))
return [T.alloc(constant_zero, y.shape[1])]
elif x.ndim == 2 and y.ndim == 1:
constant_zero = assert_(constant_zero,
T.eq(x.shape[1], y.shape[0]))
return [T.alloc(constant_zero, x.shape[0])]
elif x.ndim == 1 and y.ndim == 1:
constant_zero = assert_(constant_zero,
T.eq(x.shape[0], y.shape[0]))
return [constant_zero]
else:
_logger.warning("Optimization Warning: "
"Optimization theano/opt.py:local_0_dot_x Found "
"that it could apply, but was not implemented "
"for dot product with these input types:\n"
"(%s, %s)",
x.type, y.type)
######################
# DimShuffle lifters #
######################
def apply_local_dimshuffle_lift(var):
# return var
# lift recursively
if not var.owner:
return var
new = local_dimshuffle_lift.transform(var.owner)
if new:
return new[0]
return var
# Checks for two types of useless dimshuffles:
# 1 - dimshuffle all dimensions in order.
# 2 - dimshuffle a broadcastable dimension.
def is_dimshuffle_useless(new_order, input):
is_useless = True
if len(new_order) == input.type.ndim:
all_broadcastable_dims = [i for (i, is_broadcastable)
in enumerate(input.type.broadcastable)
if is_broadcastable] + ['x']
for i in range(input.type.ndim):
if (new_order[i] == i or
(i in all_broadcastable_dims and
new_order[i] in all_broadcastable_dims)):
is_useless = True
else:
is_useless = False
break
else:
is_useless = False
return is_useless
@gof.local_optimizer([DimShuffle])
def local_dimshuffle_lift(node):
"""
"Lifts" DimShuffle through Elemwise operations and merges
consecutive DimShuffles. Basically, applies the following
transformations on the whole graph:
DimShuffle(Elemwise(x, y)) => Elemwise(DimShuffle(x), DimShuffle(y))
DimShuffle(DimShuffle(x)) => DimShuffle(x)
DimShuffle{0,1,...}(x) => x (when the dimshuffle do nothing)
After this transform, clusters of Elemwise operations are
void of DimShuffle operations.
"""
op = node.op
if not isinstance(op, DimShuffle):
return False
input = node.inputs[0]
inode = input.owner
new_order = op.new_order
if inode and isinstance(inode.op, Elemwise) and (len(input.clients) == 1):
# Don't use make_node to have tag.test_value set.
new_inputs = []
for inp in inode.inputs:
new_inp = op.__class__(inp.type.broadcastable,
op.new_order)(inp)
new_inputs.append(apply_local_dimshuffle_lift(new_inp))
copy_stack_trace(node.outputs[0], new_inputs)
ret = inode.op(*new_inputs, **dict(return_list=True))
return ret
if inode and isinstance(inode.op, DimShuffle):
new_order = [x == 'x' and 'x' or inode.op.new_order[x] for x in
new_order]
input = inode.inputs[0]
if is_dimshuffle_useless(new_order, input):
return [input]
elif inode and isinstance(inode.op, DimShuffle):
ret = op.__class__(input.type.broadcastable, new_order)(input)
ret = apply_local_dimshuffle_lift(ret)
copy_stack_trace(node.outputs[0], ret)
return [ret]
@register_canonicalize
@gof.local_optimizer([Reshape])
def local_useless_dimshuffle_in_reshape(node):
"""
Removes useless DimShuffle operation inside Reshape:
reshape(vector.dimshuffle('x', 0), shp) => reshape(vector, shp)
reshape(matrix.dimshuffle('x', 0, 'x', 1), shp) => reshape(matrix, shp)
reshape(row.dimshuffle(1, 'x'), shp) => reshape(row, shp)
reshape(col.dimshuffle(0), shp) => reshape(col, shp)
"""
op = node.op
if not isinstance(op, Reshape):
return False
if not (node.inputs[0].owner is not None and
isinstance(node.inputs[0].owner.op, DimShuffle)):
return False
new_order = node.inputs[0].owner.op.new_order
input = node.inputs[0].owner.inputs[0]
broadcastables = node.inputs[0].broadcastable
new_order_of_nonbroadcast = []
for i, bd in zip(new_order, broadcastables):
if not bd:
new_order_of_nonbroadcast.append(i)
no_change_in_order = all(
new_order_of_nonbroadcast[i] <= new_order_of_nonbroadcast[i + 1]
for i in xrange(len(new_order_of_nonbroadcast) - 1))
if no_change_in_order:
shape = node.inputs[1]
ret = op.__class__(node.outputs[0].ndim)(input, shape)
copy_stack_trace(node.outputs[0], ret)
return [ret]
@register_canonicalize
@gof.local_optimizer([DimShuffle])
def local_lift_transpose_through_dot(node):
"""
dot(x,y).T -> dot(y.T, x.T)
These optimizations "lift" (propagate towards the inputs) DimShuffle
through dot product. It allows to put the graph in a more standard shape,
and to later merge consecutive DimShuffles.
The transformation should be apply whether or not the transpose is
inplace. The newly-introduced transpositions are not inplace, this will
be taken care of in a later optimization phase.
"""
if not (isinstance(node.op, T.DimShuffle) and node.op.new_order == (1, 0)):
return False
if not (node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, T.Dot)):
return False
x, y = node.inputs[0].owner.inputs
if x.ndim == y.ndim == 2:
# Output is dot product of transposed inputs in reverse order
ret = [T.dot(y.T, x.T)]
# Copy over stack trace to output from result of dot-product
copy_stack_trace(node.inputs[0], ret)
return ret
register_canonicalize(local_dimshuffle_lift)
register_specialize(local_dimshuffle_lift)
######################
# Casting operations #
######################
@register_canonicalize
@register_specialize
@gof.local_optimizer([T.TensorFromScalar])
def local_tensor_scalar_tensor(node):
'''tensor_from_scalar(scalar_from_tensor(x)) -> x'''
if isinstance(node.op, T.TensorFromScalar):
s = node.inputs[0]
if s.owner and isinstance(s.owner.op, T.ScalarFromTensor):
t = s.owner.inputs[0]
# We don't need to copy over any stack traces here
return [t]
@register_canonicalize
@register_specialize
@gof.local_optimizer([T.ScalarFromTensor])
def local_scalar_tensor_scalar(node):
'''scalar_from_tensor(tensor_from_scalar(x)) -> x'''
if isinstance(node.op, T.ScalarFromTensor):
t = node.inputs[0]
if t.owner and isinstance(t.owner.op, T.TensorFromScalar):
s = t.owner.inputs[0]
# We don't need to copy over any stack traces here
return [s]
#####################################
# ShapeFeature, Shape optimizations
#####################################
class MakeVector(T.Op):
"""Concatenate a number of scalars together into a vector.
This is a simple version of stack() that introduces far less cruft
into the graph. Should work with 0 inputs. The constant_folding
optimization will remove it.
"""
__props__ = ("dtype",)
def __init__(self, dtype='int64'):
self.dtype = dtype
def make_node(self, *inputs):
inputs = list(map(T.as_tensor_variable, inputs))
if (not all(a.type == inputs[0].type for a in inputs) or
(len(inputs) > 0 and inputs[0].dtype != self.dtype)):
dtype = theano.scalar.upcast(self.dtype, *[i.dtype for i in inputs])
# upcast the input to the determined dtype,
# but don't downcast anything
assert dtype == self.dtype, (
"The upcast of the inputs to MakeVector should match the "
"dtype given in __init__.")
if not all(self.dtype == T.cast(i, dtype=dtype).dtype
for i in inputs):
raise TypeError("MakeVector.make_node expected inputs"
" upcastable to %s. got %s" %
(self.dtype, str([i.dtype for i in inputs])))
inputs = [T.cast(i, dtype=dtype) for i in inputs]
assert all(self.dtype == a.dtype for a in inputs)
assert all(a.ndim == 0 for a in inputs)
if inputs:
dtype = inputs[0].type.dtype
else:
dtype = self.dtype
# bcastable = (len(inputs) == 1)
bcastable = False
otype = T.TensorType(broadcastable=(bcastable,), dtype=dtype)
return T.Apply(self, inputs, [otype()])
def perform(self, node, inputs, out_):
out, = out_
# not calling theano._asarray as optimization
if (out[0] is None) or (out[0].size != len(inputs)):
out[0] = theano._asarray(inputs, dtype=node.outputs[0].dtype)
else:
# assume that out has correct dtype. there is no cheap way to check
out[0][...] = inputs
def c_code_cache_version(self):
return (2,)
def c_code(self, node, name, inp, out_, sub):
out, = out_
# Shouldn't use PyArray_TYPE(inp[0]) for the dtype
# when len(inp) == 0 (we need to support this case.
# So there will be (1 * nb_dtype) + ((nb len(inp) - 1 ))
# different c code with the following algo
out_shape = len(inp)
out_num = numpy.dtype(node.outputs[0].dtype).num
# don't use dtype_%(out)s as when check_input=False, it isn't defined.
out_dtype = node.outputs[0].type.dtype_specs()[1]
if len(inp) > 0:
assert self.dtype == node.inputs[0].dtype
out_num = 'PyArray_TYPE(%s)' % inp[0]
ret = """
npy_intp dims[1];
dims[0] = %(out_shape)s;
if(!%(out)s || PyArray_DIMS(%(out)s)[0] != %(out_shape)s){
Py_XDECREF(%(out)s);
%(out)s = (PyArrayObject*)PyArray_EMPTY(1, dims, %(out_num)s, 0);
}
""" % locals()
for idx, i in enumerate(inp):
ret += """
*((%(out_dtype)s *)PyArray_GETPTR1(%(out)s, %(idx)s)) = *((%(out_dtype)s *) PyArray_DATA(%(i)s));
""" % locals()
return ret
def infer_shape(self, node, ishapes):
return [(len(ishapes),)]
def grad(self, inputs, output_gradients):
# If the output is of an integer dtype, no gradient shall pass
if self.dtype in theano.tensor.discrete_dtypes:
return [ipt.zeros_like().astype(theano.config.floatX)
for ipt in inputs]
grads = []
for i, inp in enumerate(inputs):
grads.append(output_gradients[0][i])
return grads
def R_op(self, inputs, eval_points):
if None in eval_points:
return [None]
return self.make_node(*eval_points).outputs
make_vector = MakeVector()
class MakeVectorPrinter:
def process(self, r, pstate):
if r.owner is None:
raise TypeError("Can only print make_vector.")
elif isinstance(r.owner.op, MakeVector):
old_precedence = getattr(pstate, 'precedence', None)
try:
pstate.precedence = 1000
s = [pstate.pprinter.process(input)
for input in r.owner.inputs]
finally:
pstate.precedence = old_precedence
return "[%s]" % ", ".join(s)
else:
raise TypeError("Can only print make_vector.")
T.pprint.assign(MakeVector, MakeVectorPrinter())
class ShapeFeature(object):
"""Graph optimizer for removing all calls to shape().
This optimizer replaces all Shapes and Subtensors of Shapes with
Shape_i and MakeVector Ops.
This optimizer has several goals:
1. to 'lift' Shapes to as close to the inputs as possible.
2. to infer the shape of every node in the graph in terms of the
input shapes.
3. remove all fills (T.second, T.fill) from the graph
Lifting shapes as close to the inputs as possible is important for
canonicalization because it is very bad form to have to compute
something just to know how big it will be. Firstly, it is a waste
of time to compute such outputs. But it is important to get rid
of these outputs as early as possible in the compilation process
because the extra computations make it appear as if many internal
graph nodes have multiple clients. Many optimizations refuse to
work on nodes with multiple clients.
Lifting is done by using an `<Op>.infer_shape` function if one is
present, or else using a conservative default. An Op that
supports shape-lifting should define a infer_shape(self, node,
input_shapes) function. The argument input_shapes is a tuple of
tuples... there is an interior tuple for each input to the node.
The tuple has as many elements as dimensions. The element in
position i of tuple j represents the i'th shape component of the
j'th input. The function should return a tuple of tuples. One
output tuple for each node.output. Again, the i'th element of the
j'th output tuple represents the output[j].shape[i] of the
function. If an output is not a TensorType, then None should be
returned instead of a tuple for that output.
For example the infer_shape for a matrix-matrix product would accept
input_shapes=((x0,x1), (y0,y1)) and return ((x0, y1),).
Inferring the shape of internal nodes in the graph is important
for doing size-driven optimizations. If we know how big various
intermediate results will be, we can estimate the cost of many Ops
accurately, and generate c-code that is specific [e.g. unrolled]
to particular sizes.
In cases where you cannot figure out the shape, raise a ShapeError.
Notes
-----
Right now there is only the ConvOp that could really take
advantage of this shape inference, but it is worth it even
just for the ConvOp. All that's necessary to do shape
inference is 1) to mark shared inputs as having a particular
shape, either via a .tag or some similar hacking; and 2) to
add an optional In() argument to promise that inputs will
have a certain shape (or even to have certain shapes in
certain dimensions). We can't automatically infer the shape of
shared variables as they can change of shape during the
execution by default. (NOT IMPLEMENTED YET, BUT IS IN TRAC)
**Using Shape information in Optimizations**
To use this shape information in OPTIMIZATIONS, use the
``shape_of`` dictionary.
For example:
.. code-block:: python
try:
shape_of = node.fgraph.shape_feature.shape_of
except AttributeError:
# This can happen when the mode doesn't include the ShapeFeature.
return
shape_of_output_zero = shape_of[node.output[0]]
The ``shape_of_output_zero`` symbol will contain a tuple, whose
elements are either integers or symbolic integers.
TODO: check to see if the symbols are necessarily
non-constant... or are integer literals sometimes Theano
constants?? That would be confusing.
"""
def get_node_infer_shape(self, node):
try:
shape_infer = node.op.infer_shape
except AttributeError:
shape_infer = self.default_infer_shape
try:
o_shapes = shape_infer(node,
[self.shape_of[r] for r in node.inputs])
except ShapeError:
o_shapes = self.default_infer_shape(node, [self.shape_of[r] for
r in node.inputs])
except NotImplementedError as e:
raise NotImplementedError(
'Code called by infer_shape failed raising a '
'NotImplementedError. Raising NotImplementedError to '
'indicate that a shape cannot be computed is no longer '
'supported, and one should now use tensor.ShapeError '
'instead. The original exception message is: %s' % e)
except Exception as e:
msg = ('Failed to infer_shape from Op %s.\nInput shapes: '
'%s\nException encountered during infer_shape: '
'%s\nException message: %s\nTraceback: %s') % (
node.op, [self.shape_of[r] for r in node.inputs],
type(e), str(e), traceback.format_exc())
if config.on_shape_error == "raise":
raise Exception(msg)
else:
_logger.warning(msg)
o_shapes = self.default_infer_shape(
node, [self.shape_of[r] for r in node.inputs])
return o_shapes
def get_shape(self, var, idx):
""" Optimization can call this to get the current shape_i
It is better to call this then use directly shape_of[var][idx]
as this method should update shape_of if needed.
TODO: Up to now, we don't update it in all cases. Update in all cases.
"""
r = self.shape_of[var][idx]
if (r.owner and
isinstance(r.owner.op, Shape_i) and
r.owner.inputs[0] not in var.fgraph.variables):
assert var.owner
node = var.owner
# recur on inputs
for i in node.inputs:
if getattr(i, 'ndim', None) > 0:
self.get_shape(i, 0)
o_shapes = self.get_node_infer_shape(node)
assert len(o_shapes) == len(node.outputs)
# Only change the variables and dimensions that would introduce
# extra computation
for new_shps, out in zip(o_shapes, node.outputs):
if not hasattr(out, 'ndim'):
continue
merged_shps = list(self.shape_of[out])
changed = False
for i in range(out.ndim):
n_r = merged_shps[i]
if (n_r.owner and
isinstance(n_r.owner.op, Shape_i) and
n_r.owner.inputs[0] not in var.fgraph.variables):
changed = True
merged_shps[i] = new_shps[i]
if changed:
self.set_shape(out, merged_shps, override=True)
r = self.shape_of[var][idx]
return r
def shape_ir(self, i, r):
"""Return symbolic r.shape[i] for tensor variable r, int i."""
if hasattr(r.type, "broadcastable") and r.type.broadcastable[i]:
return self.lscalar_one
else:
# Do not call make_node for test_value
s = Shape_i(i)(r)
try:
s = get_scalar_constant_value(s)
except NotScalarConstantError:
pass
return s
def shape_tuple(self, r):
"""Return a tuple of symbolic shape vars for tensor variable r."""
if not hasattr(r, 'ndim'):
# This happen for NoneConst.
return None
return tuple([self.shape_ir(i, r) for i in xrange(r.ndim)])
def default_infer_shape(self, node, i_shapes):
"""Return a list of shape tuple or None for the outputs of node.
This function is used for Ops that don't implement infer_shape.
Ops that do implement infer_shape should use the i_shapes parameter,
but this default implementation ignores it.
"""
rval = []
for r in node.outputs:
try:
rval.append(self.shape_tuple(r))
except AttributeError:
rval.append(None)
return rval
def unpack(self, s_i, var):
"""Return a symbolic integer scalar for the shape element s_i.
The s_i argument was produced by the infer_shape() of an Op subclass.
var: the variable that correspond to s_i. This is just for
error reporting.
"""
# unpack the s_i that the Op returned
assert s_i is not None
if s_i == 1:
# don't make the optimizer merge a zillion ones together
# by always returning the same object to represent 1
return self.lscalar_one
if type(s_i) is float and int(s_i) == s_i:
s_i = int(s_i)
if (type(s_i) in integer_types or
isinstance(s_i, numpy.integer) or
(isinstance(s_i, numpy.ndarray) and s_i.ndim == 0)):
# this shape is a constant
if s_i < 0:
msg = "There is a negative shape in the graph!"
msg += gof.utils.get_variable_trace_string(var)
raise ValueError(msg)
return T.constant(s_i, dtype='int64')
if type(s_i) in (tuple, list):
# this dimension is the same as many of the inputs
# which tells us that if one of the inputs is known,
# the others all become known.
# TODO: should be implemented in Elemwise, and Dot
#
# worst case, we loop over shape_of and replace things
raise NotImplementedError(s_i)
# s_i is x.shape[i] for some x, we change it to shape_of[x][i]
if (s_i.owner and
isinstance(s_i.owner.op, Subtensor) and
s_i.owner.inputs[0].owner and
isinstance(s_i.owner.inputs[0].owner.op, T.Shape)):
assert s_i.ndim == 0
assert len(s_i.owner.op.idx_list) == 1
# The current Subtensor always put constant index in the graph.
# This was not True in the past. So call the Subtensor function
# that will return the right index.
idx = get_idx_list(s_i.owner.inputs, s_i.owner.op.idx_list)
assert len(idx) == 1
idx = idx[0]
try:
i = get_scalar_constant_value(idx)
except NotScalarConstantError:
pass
else:
# Executed only if no exception was raised
x = s_i.owner.inputs[0].owner.inputs[0]
# x should already have been imported, and should be in shape_of.
s_i = self.shape_of[x][i]
if s_i.type.dtype in theano.tensor.integer_dtypes:
if getattr(s_i.type, 'ndim', 0):
raise TypeError('Shape element must be scalar', s_i)
return s_i
else:
raise TypeError('Unsupported shape element',
s_i, type(s_i), getattr(s_i, 'type', None))
def set_shape(self, r, s, override=False):
"""Assign the shape `s` to previously un-shaped variable `r`.
Parameters
----------
r : a variable
s : None or a tuple of symbolic integers
override : If False, it mean r is a new object in the fgraph.
If True, it mean r is already in the fgraph and we want to
override its shape.
"""
if not override:
assert r not in self.shape_of, 'r already in shape_of'
if s is None:
self.shape_of[r] = s
else:
if not isinstance(s, (tuple, list)):
raise TypeError('shapes must be tuple/list', (r, s))
if r.ndim != len(s):
sio = StringIO()
theano.printing.debugprint(r, file=sio, print_type=True)
raise AssertionError(
"Something inferred a shape with %d dimensions "
"for a variable with %d dimensions"
" for the variable:\n%s" % (
len(s), r.ndim, sio.getvalue()))
shape_vars = []
for i in xrange(r.ndim):
if (hasattr(r.type, 'broadcastable') and
r.type.broadcastable[i]):
shape_vars.append(self.lscalar_one)
else:
shape_vars.append(self.unpack(s[i], r))
assert all([not hasattr(r.type, "broadcastable") or
not r.type.broadcastable[i] or
# The two following comparison are a speed optimization
# But we never timed this speed optimization!
self.lscalar_one.equals(shape_vars[i]) or
self.lscalar_one.equals(
T.extract_constant(shape_vars[i]))
for i in xrange(r.ndim)])
self.shape_of[r] = tuple(shape_vars)
for sv in shape_vars:
self.shape_of_reverse_index.setdefault(sv, set()).add(r)
def update_shape(self, r, other_r):
"""Replace shape of r by shape of other_r.
If, on some dimensions, the shape of other_r is not informative,
keep the shape of r on those dimensions.
"""
# other_r should already have a shape
assert other_r in self.shape_of, ('other_r not in shape_of', other_r)
other_shape = self.shape_of[other_r]
# If other_shape has no information, call is pointless.
if other_shape is None:
return
if r in self.shape_of:
r_shape = self.shape_of[r]
else:
# If no info is known on r's shape, use other_shape
self.set_shape(r, other_shape)
return
if (other_r.owner and r.owner and
other_r.owner.inputs == r.owner.inputs and
other_r.owner.op == r.owner.op):
# We are doing a merge. So the 2 shapes graph will be the
# same. This is only a speed optimization to call
# ancestors() less frequently.
return
# Merge other_shape with r_shape, giving the priority to other_shape
merged_shape = []
for i, ps in enumerate(other_shape):
if r_shape is None and other_shape:
merged_shape.append(other_shape[i])
elif (ps.owner and
isinstance(getattr(ps.owner, 'op', None), Shape_i) and
ps.owner.op.i == i and
ps.owner.inputs[0] in (r, other_r)):
# If other_shape[i] is uninformative, use r_shape[i].
# For now, we consider 2 cases of uninformative other_shape[i]:
# - Shape_i(i)(other_r);
# - Shape_i(i)(r).
merged_shape.append(r_shape[i])
elif isinstance(r_shape[i], (Constant, integer_types)):
# We do this to call less often ancestors and make
# sure we have the simplest shape possible.
merged_shape.append(r_shape[i])
elif isinstance(other_shape[i], (Constant, integer_types)):
# We do this to call less often ancestors and make
# sure we have the simplest shape possible.
merged_shape.append(other_shape[i])
elif other_shape[i] == r_shape[i]:
# This mean the shape is equivalent
# We do not want to do the ancestor check in those cases
merged_shape.append(r_shape[i])
elif r_shape[i] in theano.gof.graph.ancestors([other_shape[i]]):
# Another case where we want to use r_shape[i] is when
# other_shape[i] actually depends on r_shape[i]. In that case,
# we do not want to substitute an expression with another that
# is strictly more complex. Such a substitution could also lead
# to cycles: if (in the future) r_shape[i] gets replaced by an
# expression of other_shape[i], other_shape[i] may end up
# depending on itself.
merged_shape.append(r_shape[i])
else:
merged_shape.append(other_shape[i])
assert all([(not hasattr(r.type, "broadcastable") or
not r.type.broadcastable[i] and
not other_r.type.broadcastable[i]) or
# The two following comparison are a speed optimization
# But we never timed this speed optimization!
self.lscalar_one.equals(merged_shape[i]) or
self.lscalar_one.equals(
T.extract_constant(merged_shape[i], only_process_constants=True))
for i in xrange(r.ndim)])
self.shape_of[r] = tuple(merged_shape)
for sv in self.shape_of[r]:
self.shape_of_reverse_index.setdefault(sv, set()).add(r)
def set_shape_i(self, r, i, s_i):
'''Replace element i of shape_of[r] by s_i'''
assert r in self.shape_of
prev_shape = self.shape_of[r]
# prev_shape is a tuple, so we cannot change it inplace,
# so we build another one.
new_shape = []
for j, s_j in enumerate(prev_shape):
if j == i:
new_shape.append(self.unpack(s_i, r))
else:
new_shape.append(s_j)
assert all([not hasattr(r.type, "broadcastable") or
not r.type.broadcastable[idx] or
# The two following comparison are a speed optimization
# But we never timed this speed optimization!
self.lscalar_one.equals(new_shape[idx]) or
self.lscalar_one.equals(T.extract_constant(new_shape[idx]))
for idx in xrange(r.ndim)])
self.shape_of[r] = tuple(new_shape)
for sv in self.shape_of[r]:
self.shape_of_reverse_index.setdefault(sv, set()).add(r)
def init_r(self, r):
'''Register r's shape in the shape_of dictionary.'''
if r not in self.shape_of:
try:
self.set_shape(r, self.shape_tuple(r))
except AttributeError: # XXX: where would this come from?
self.set_shape(r, None)
def make_vector_shape(self, r):
return make_vector(*self.shape_of[r])
#
# Feature interface
#
#
def on_attach(self, fgraph):
assert not hasattr(fgraph, 'shape_feature')
fgraph.shape_feature = self
# Must be local to the object as otherwise we reuse the same
# variable for multiple fgraph!
self.lscalar_one = T.constant(1, dtype='int64')
assert self.lscalar_one.type == T.lscalar
self.shape_of = {}
# Variable -> tuple(scalars) or None (All tensor vars map to tuple)
self.scheduled = {}
# Variable ->
self.shape_of_reverse_index = {}
# shape var -> graph v
for node in fgraph.toposort():
self.on_import(fgraph, node, reason='on_attach')
def on_detach(self, fgraph):
self.shape_of = {}
self.scheduled = {}
self.shape_of_reverse_index = {}
del fgraph.shape_feature
def on_import(self, fgraph, node, reason):
if node.outputs[0] in self.shape_of:
# this is a revert, not really an import
for r in node.outputs + node.inputs:
assert r in self.shape_of
return
for i, r in enumerate(node.inputs):
# make sure we have shapes for the inputs
self.init_r(r)
o_shapes = self.get_node_infer_shape(node)
# this is packed information
# an element of o_shapes is either None or a tuple
# elements of the tuple can be either strings, or ints
if len(o_shapes) != len(node.outputs):
raise Exception(
('The infer_shape method for the Op "%s" returned a list ' +
'with the wrong number of element: len(o_shapes) = %d ' +
' != len(node.outputs) = %d') % (str(node.op),
len(o_shapes),
len(node.outputs)))
# Ensure shapes are in 'int64'. This is to make sure the assert
# found in the `local_useless_subtensor` optimization does not fail.
for sh_idx, sh in enumerate(o_shapes):
if sh is None:
continue
if not isinstance(sh, (list, tuple)):
raise ValueError("infer_shape of %s didn't return a list of"
" list. It returned '%s'" % (str(node), str(o_shapes)))
new_shape = []
for i, d in enumerate(sh):
# Note: we ignore any shape element that is not typed (i.e.,
# does not have a 'dtype' attribute). This means there may
# still remain int elements that are int32 on 32-bit platforms,
# but this works with `local_useless_subtensor`, so for now we
# keep it this way. See #266 for a better long-term fix.
if getattr(d, 'dtype', 'int64') != 'int64':
assert d.dtype in theano.tensor.discrete_dtypes, (node, d.dtype)
assert str(d.dtype) != 'uint64', node
new_shape += sh[len(new_shape):i + 1]
if isinstance(d, T.Constant):
casted_d = T.constant(d.data, dtype='int64')
else:
casted_d = theano.tensor.cast(d, 'int64')
new_shape[i] = casted_d
if new_shape:
# We replace the shape with wrong dtype by the one with
# 'int64'.
new_shape += sh[len(new_shape):]
o_shapes[sh_idx] = tuple(new_shape)
for r, s in izip(node.outputs, o_shapes):
self.set_shape(r, s)
def on_change_input(self, fgraph, node, i, r, new_r, reason):
if new_r not in self.shape_of:
# It happen that the fgraph didn't called on_import for some
# new_r. This happen when new_r don't have an
# owner(i.e. it is a constant or an input of the graph)
# update_shape suppose that r and new_r are in shape_of.
self.init_r(new_r)
# This tells us that r and new_r must have the same shape if
# we didn't know that the shapes are related, now we do.
self.update_shape(new_r, r)
# change_input happens in two cases:
# 1) we are trying to get rid of r, or
# 2) we are putting things back after a failed transaction.
# In case 1, if r has a shape_i client, we will want to
# replace the shape_i of r with the shape of new_r. Say that
# r is *scheduled*.
# At that point, node is no longer a client of r, but of new_r
for (shpnode, idx) in (r.clients + [(node, i)]):
if isinstance(getattr(shpnode, 'op', None), Shape_i):
idx = shpnode.op.i
repl = self.shape_of[new_r][idx]
if repl.owner is shpnode:
# This mean the replacement shape object is
# exactly the same as the current shape object. So
# no need for replacement. This happen for example
# with the InputToGpuOptimizer optimizer.
continue
if (repl.owner and
repl.owner.inputs[0] is shpnode.inputs[0] and
isinstance(repl.owner.op, Shape_i) and
repl.owner.op.i == shpnode.op.i):
# The replacement is a shape_i of the same
# input. So no need to do this equivalent
# replacement.
continue
if shpnode.outputs[0] in theano.gof.graph.ancestors([repl]):
raise InconsistencyError(
"This substitution would insert a cycle in the graph:"
"node: %s, i: %i, r: %s, new_r: %s"
% (node, i, r, new_r))
self.scheduled[shpnode] = new_r
# In case 2, if r is a variable that we've scheduled for shape update,
# then we should cancel it.
unscheduled = [k for k, v in self.scheduled.items() if v == r]
for k in unscheduled:
del self.scheduled[k]
# In either case, r could be in shape_of.values(), that is, r itself
# is the shape of something. In that case, we want to update
# the value in shape_of, to keep it up-to-date.
for v in self.shape_of_reverse_index.get(r, []):
# The reverse index is only approximate. It is not updated on
# deletion of variables, or on change_input so it might be the
# case that there are a few extra `v`'s in it that no longer have
# a shape of r or possibly have been deleted from shape_of
# entirely. The important thing is that it permits to recall
# all variables with r in their shape.
for ii, svi in enumerate(self.shape_of.get(v, [])):
if svi == r:
self.set_shape_i(v, ii, new_r)
self.shape_of_reverse_index[r] = set()
def same_shape(self, x, y, dim_x=None, dim_y=None):
"""Return True if we are able to assert that x and y have the
same shape.
dim_x and dim_y are optional. If used, they should be an index
to compare only 1 dimension of x and y.
"""
sx = self.shape_of[x]
sy = self.shape_of[y]
if sx is None or sy is None:
return False
if dim_x is not None:
sx = [sx[dim_x]]
if dim_y is not None:
sy = [sy[dim_y]]
assert len(sx) == len(sy)
# We look on each dimensions we want to compare.
# If any of them can't be asserted to be equal, return False.
# Otherwise, we return True at the end.
for dx, dy in zip(sx, sy):
if dx is dy:
continue
# Need to try to find that they are the same shape. We
# need to compare the full graph. It could be slow. So I
# just implement for now the case of Shape_i.
if not dx.owner or not dy.owner:
return False
if (not isinstance(dx.owner.op, Shape_i) or
not isinstance(dy.owner.op, Shape_i)):
return False
opx = dx.owner.op
opy = dy.owner.op
if not (opx.i == opy.i):
return False
# FB I'm not sure if this handle correctly constants.
if dx.owner.inputs[0] == dy.owner.inputs[0]:
continue
# To be sure to cover all case, call equal_computation.
# Can't use theano.gof.graph.is_same_graph(dx, dy)
# As it currently expect that dx and dy aren't in a FunctionGraph
from theano.scan_module.scan_utils import equal_computations
if not equal_computations([dx], [dy]):
return False
return True
class ShapeOptimizer(Optimizer):
"""Optimizer that serves to add ShapeFeature as an fgraph feature."""
def add_requirements(self, fgraph):
fgraph.attach_feature(ShapeFeature())
def apply(self, fgraph):
pass
class UnShapeOptimizer(Optimizer):
"""Optimizer remove ShapeFeature as an fgraph feature."""
def apply(self, fgraph):
for feature in fgraph._features:
if isinstance(feature, ShapeFeature):
fgraph.remove_feature(feature)
# Register it after merge1 optimization at 0. We don't want to track
# the shape of merged node.
theano.compile.mode.optdb.register('ShapeOpt', ShapeOptimizer(),
0.1, 'fast_run', 'fast_compile')
# Not enabled by default for now. Some crossentropy opt use the
# shape_feature. They are at step 2.01. uncanonicalize is at step
# 3. After it goes to 48.5 that move to the gpu. So 10 seem resonable.
theano.compile.mode.optdb.register('UnShapeOpt', UnShapeOptimizer(),
10)
def local_elemwise_alloc_op(ElemwiseOP, AllocOP, DimShuffleOP):
def local_elemwise_alloc(node):
"""
elemwise(alloc(x, shp), ..., y.TensorType(BROADCAST CONDITION))
-> elemwise(x, y.TensorType(BROADCAST CONDITION))
elemwise(dimshuffle(alloc(x, shp)),... ,y.TensorType(BROADCAST CONDITION))
-> elemwise(x.dimshuffle(...), y.TensorType(BROADCAST CONDITION))
BROADCAST CONDITION: the condition is that the one input that are
not to be optimized to have the same broadcast pattern as the
output.
We can change the alloc by a dimshuffle as the elemwise
already have the shape info. The dimshuffle will be faster
to exec.
"""
if not isinstance(node.op, ElemwiseOP):
return False
if len(node.outputs) > 1:
# Ensure all outputs have the same broadcast pattern
# This is a supposition that I'm not sure is always true.
assert all([o.type.broadcastable ==
node.outputs[0].type.broadcastable for o in
node.outputs[1:]])
# The broadcast pattern of the ouptut must match the broadcast
# pattern of at least one of the inputs.
if not any([i.type.broadcastable ==
node.outputs[0].type.broadcastable for i in node.inputs]):
return False
def dimshuffled_alloc(i):
return (isinstance(i.owner.op, DimShuffleOP) and
i.owner.inputs[0].owner and
isinstance(i.owner.inputs[0].owner.op, AllocOP))
# At least one input must have an owner that is either a AllocOP or a
# DimShuffleOP with an owner that is a AllocOP -- otherwise there is
# nothing to optimize.
if not any([i.owner and (isinstance(i.owner.op, AllocOP) or
dimshuffled_alloc(i)) for i in node.inputs]):
return False
# Search for input that we can use as a baseline for the dimensions.
assert_op_idx = -1
for idx, i in enumerate(node.inputs):
if i.type.broadcastable == node.outputs[0].type.broadcastable:
# Prefer an input that is not a AllocOP nor a DimShuffleOP of a
# AllocOP so that all allocs can be optimized.
if not (i.owner and (isinstance(i.owner.op, AllocOP) or
dimshuffled_alloc(i))):
assert_op_idx = idx
break
# It may be the case that only AllocOP and DimShuffleOP of AllocOP exist.
if assert_op_idx < 0:
# We want to optimize as many allocs as possible. When
# there is more than one then do all but one. number of
# inputs with alloc or dimshuffle alloc
l2 = [i for i in node.inputs
if (i.owner and (isinstance(i.owner.op, AllocOP) or
dimshuffled_alloc(i)))]
# If only 1 alloc or dimshuffle alloc, it is the one we
# will use for the shape. So no alloc would be removed.
if len(l2) > 1:
# l containt inputs with alloc or dimshuffle alloc
# only. Its length will always be at least one, as we
# checked that before
l = [idx for idx, i in enumerate(node.inputs)
if i.broadcastable == node.outputs[0].broadcastable]
assert_op_idx = l[0] # The first one is as good as any to use.
else:
# Nothing would be optimized!
return False
assert_op = node.inputs[assert_op_idx]
cmp_op = assert_op
new_i = []
same_shape = node.fgraph.shape_feature.same_shape
for i in node.inputs:
# Remove alloc
if (i.owner and isinstance(i.owner.op, AllocOP) and
i.owner.inputs[0].type != i.owner.outputs[0].type):
# when i.owner.inputs[0].type == i.owner.outputs[0].type we
# will remove that alloc later
assert i.type.ndim == cmp_op.ndim
if theano.config.experimental.local_alloc_elemwise_assert:
get_shape = node.fgraph.shape_feature.get_shape
cond = []
for idx in xrange(i.type.ndim):
if (not i.type.broadcastable[idx] and
not same_shape(i, cmp_op, idx, idx)):
i_shp = get_shape(i, idx)
cmp_shp = get_shape(cmp_op, idx)
cond.append(T.eq(i_shp, cmp_shp))
if cond:
assert_op = assert_(assert_op, *cond)
new_i.append(i.owner.inputs[0])
# Remove Alloc in DimShuffle
elif i.owner and dimshuffled_alloc(i):
assert i.type.ndim == cmp_op.type.ndim
if theano.config.experimental.local_alloc_elemwise_assert:
assert_cond = [T.eq(i.shape[idx], cmp_op.shape[idx])
for idx in xrange(i.type.ndim)
if not i.type.broadcastable[idx] and
not same_shape(i, cmp_op, idx, idx)]
if assert_cond:
assert_op = assert_(assert_op, *assert_cond)
alloc_input = i.owner.inputs[0].owner.inputs[0]
if alloc_input.ndim != i.owner.inputs[0].ndim:
# The alloc can add dimension to the value
# We add a dimshuffle to add them.
# We let later optimization merge the multiple dimshuffle
nb_dim_to_add = i.owner.inputs[0].ndim - alloc_input.ndim
alloc_input = alloc_input.dimshuffle(
['x'] * nb_dim_to_add +
list(range(alloc_input.ndim)))
# We need to keep the dimshuffle. It could swap axes or
# add dimensions anywhere.
r_i = i.owner.op(alloc_input)
# Copy stack trace from i to new_i
copy_stack_trace(i, r_i)
new_i.append(r_i)
else:
new_i.append(i)
new_i[assert_op_idx] = assert_op
ret = node.op(*new_i, return_list=True)
# Copy over stack trace from previous outputs to new outputs.
copy_stack_trace(node.outputs, ret)
return ret
return local_elemwise_alloc
# TODO, global optimizer that lift the assert to the beginning of the graph.
# TODO, optimize all inputs when possible -- currently when all inputs have
# an alloc all but one is optimized.
local_elemwise_alloc = register_specialize(
gof.local_optimizer([T.Elemwise])(
local_elemwise_alloc_op(T.Elemwise, T.Alloc, T.DimShuffle)),
'local_alloc_elemwise')
@gof.local_optimizer([T.Elemwise])
def local_fill_sink(node):
"""
f(fill(a, b), fill(c, d), e) -> fill(c, fill(a, f(b, d, e)))
f need to be an elemwise that isn't a fill.
"""
if (not hasattr(node, 'op') or
not isinstance(node.op, T.Elemwise) or
node.op == T.fill):
return False
models = []
inputs = []
for input in node.inputs:
if input.owner and input.owner.op == T.fill:
models.append(input.owner.inputs[0])
inputs.append(input.owner.inputs[1])
else:
inputs.append(input)
if not models:
return False
c = node.op(*inputs)
for model in models:
if model.type != c.type:
c = T.fill(model, c)
# The newly created node c doesn't has 'clients',
# so this iteration is took place with node.outputs[0]
replacements = {node.outputs[0]: c}
for client, cl_idx in node.outputs[0].clients:
if (hasattr(client, 'op') and
isinstance(client.op, T.Elemwise) and
not client.op == T.fill):
client_inputs = client.inputs[:]
client_inputs[cl_idx] = c
new_client = client.op(*client_inputs)
# Add clients to new_client
new_client.owner.outputs[0].clients = client.outputs[0].clients
r = local_fill_sink.transform(new_client.owner)
if not r:
continue
replacements.update(r)
return replacements
register_canonicalize(local_fill_sink)
@register_specialize
@register_stabilize
# @register_canonicalize # We make full pass after the canonizer phase.
@gof.local_optimizer([T.fill])
def local_fill_to_alloc(node):
"""fill(s,v) -> alloc(v, shape(s))
This is an important optimization because with the shape_to_shape_i
optimization, the dependency on 's' is often removed.
"""
if node.op == T.fill:
r, v = node.inputs
if v.type == node.outputs[0].type:
# this is a useless fill, erase it.
rval = [v]
elif v.type.broadcastable == node.outputs[0].type.broadcastable:
# this is a cast
rval = [T.cast(v, node.outputs[0].type.dtype)]
elif r.type.broadcastable == node.outputs[0].type.broadcastable:
# we are broadcasting v somehow, but not r
o = broadcast_like(v, r, node.fgraph, dtype=v.dtype)
copy_stack_trace(node.outputs[0], o)
rval = [o]
else:
# we are broadcasting both v and r,
# the output shape must be computed
#
# TODO: implement this case (including a test!)
#
# I think the strategy should be to extend the shorter
# shape vector with 1s (how?) and then take the
# elementwise max of the two. - how to flag an error of
# shape mismatch where broadcasting should be illegal?
return
# TODO: cut out un-necessary dimshuffles of v
assert rval[0].type == node.outputs[0].type, (
'rval', rval[0].type, 'orig', node.outputs[0].type, 'node',
node,) # theano.printing.debugprint(node.outputs[0], file='str'))
return rval
# Register this after stabilize at 1.5 to make sure stabilize don't
# get affected by less canonicalized graph due to alloc.
compile.optdb.register('local_fill_to_alloc',
in2out(local_fill_to_alloc),
1.51, 'fast_run')
# Needed to clean some extra alloc added by local_fill_to_alloc
compile.optdb.register('local_elemwise_alloc',
in2out(local_elemwise_alloc),
1.52, 'fast_run')
@register_canonicalize("fast_compile")
@register_useless
@gof.local_optimizer([T.fill])
def local_useless_fill(node):
"""fill(s,v) -> v
This optimization is only needed in FAST_COMPILE to make the code
more readable. Normally, it is done by the local_fill_to_alloc
opt.
"""
if node.op == T.fill:
r, v = node.inputs
if v.type == node.outputs[0].type:
# this is a useless fill, erase it.
# also, we don't need to copy over any stack traces here
return [v]
@register_specialize
@register_stabilize
@register_canonicalize
@register_useless
@gof.local_optimizer([T.alloc])
def local_useless_alloc(node):
"""
If the input type is the same as the output type (dtype and broadcast)
there is no change in the shape of the input. So this is just a simple copy
of the input. This is not needed.
"""
op = node.op
if not isinstance(op, Alloc):
return False
input = node.inputs[0]
output = node.outputs[0]
# Check if dtype and broadcast remain the same.
if input.type == output.type:
# We don't need to copy over any stack traces here
return [input]
@register_specialize
@register_stabilize
@register_canonicalize
@gof.local_optimizer([T.alloc])
def local_canonicalize_alloc(node):
"""If the input type is the same as the output type (dtype and broadcast)
there is no change in the shape of the input. So this is just a simple copy
of the input. This is not needed. (as local_useless_alloc)
Also, it will canonicalize alloc by creating Dimshuffle after the
alloc to introduce the dimensions of constant size 1.
See https://github.com/Theano/Theano/issues/4072 to know why this
is needed.
"""
op = node.op
if not isinstance(op, Alloc):
return False
input = node.inputs[0]
output = node.outputs[0]
# Check if dtype and broadcast remain the same.
if input.type == output.type:
# We don't need to copy over any stack traces here
return [input]
# Allow local_merge_alloc to do its work first
clients = getattr(output, 'clients', [])
for client, i in clients:
if client != "output" and isinstance(client.op, Alloc):
return
# Check if alloc adds a broadcastable dimension with shape 1.
output_shape = node.inputs[1:]
num_dims_with_size_1_added_to_left = 0
for i in range(len(output_shape) - input.ndim):
if extract_constant(output_shape[i], only_process_constants=True) == 1:
num_dims_with_size_1_added_to_left += 1
else:
break
new_output_shape = output_shape[num_dims_with_size_1_added_to_left:]
if num_dims_with_size_1_added_to_left > 0 and len(new_output_shape) >= input.ndim:
if output.broadcastable[num_dims_with_size_1_added_to_left:] == input.broadcastable:
inner = input
else:
inner = op(*([input] + new_output_shape))
dimshuffle_new_order = (['x'] * num_dims_with_size_1_added_to_left +
list(xrange(len(new_output_shape))))
return [DimShuffle(inner.type.broadcastable, dimshuffle_new_order)(inner)]
# Don't register by default.
@gof.local_optimizer([T.AllocEmpty])
def local_alloc_empty_to_zeros(node):
"""This convert AllocEmpty to Alloc of 0.
This help investigate NaN with NanGuardMode. Not registered by
default. To activate it, use the Theano flag
optimizer_including=alloc_empty_to_zeros. This also enable
the GPU version of this optimizations.
"""
if isinstance(node.op, T.AllocEmpty):
return [T.zeros(node.inputs, dtype=node.outputs[0].dtype)]
compile.optdb.register('local_alloc_empty_to_zeros',
in2out(local_alloc_empty_to_zeros),
# After move to gpu and merge2, before inplace.
49.3,
'alloc_empty_to_zeros',)
@register_specialize
@register_canonicalize
@gof.local_optimizer([T.Shape])
def local_shape_to_shape_i(node):
if node.op == T.shape:
# This optimization needs ShapeOpt and fgraph.shape_feature
if not hasattr(node.fgraph, 'shape_feature'):
return
shape_feature = node.fgraph.shape_feature
ret = shape_feature.make_vector_shape(node.inputs[0])
# We need to copy over stack trace from input to output
copy_stack_trace(node.outputs[0], ret)
return [ret]
# TODO: Not sure what type of node we are expecting here
@register_specialize
@register_canonicalize
@gof.local_optimizer(None)
def local_track_shape_i(node):
try:
shape_feature = node.fgraph.shape_feature
except AttributeError:
return
if node in shape_feature.scheduled:
# Don't unschedule node as it could be reinserted in the
# fgraph as we don't change it in the shapefeature internal
# structure.
assert isinstance(node.op, Shape_i)
replacement = shape_feature.scheduled[node]
return [shape_feature.shape_of[replacement][node.op.i]]
@register_specialize
@register_canonicalize
@gof.local_optimizer([Subtensor])
def local_subtensor_inc_subtensor(node):
"""
Subtensor(SetSubtensor(x, y, idx), idx) -> y
"""
if isinstance(node.op, Subtensor):
x = node.inputs[0]
if not x.owner or not isinstance(x.owner.op, IncSubtensor):
return
if not x.owner.op.set_instead_of_inc:
return
if (x.owner.inputs[2:] == node.inputs[1:] and
tuple(x.owner.op.idx_list) == tuple(node.op.idx_list)):
out = node.outputs[0]
y = x.owner.inputs[1]
# If the dtypes differ, cast y into x.dtype
if x.dtype != y.dtype:
y = y.astype(x.dtype)
if out.type == y.type:
# if x[idx] and y have the same type, directly return y
return [y]
else:
# The difference is related to broadcasting pattern
assert out.broadcastable != y.broadcastable
# We have to alloc y to the shape of x[idx]
x_subtensor = node.op(x.owner.inputs[0], *x.owner.inputs[2:])
return [T.alloc(y, *x_subtensor.shape)]
else:
return
@register_specialize
@register_canonicalize
@gof.local_optimizer([Subtensor])
def local_subtensor_remove_broadcastable_index(node):
"""
Remove broadcastable dimension with index 0 or -1
a[:,:,:,0] -> a.dimshuffle(0,1,2), when
a.broadcastable = (False, False, False, True)
a[0,:,-1,:] -> a.dimshuffle(1,3), when
a.broadcastable = (True, False, True, False)
"""
if isinstance(node.op, Subtensor):
idx = node.op.idx_list
else:
return
remove_dim = []
node_inputs_idx = 1
for dim, elem in enumerate(idx):
if isinstance(elem, (scalar.Scalar)):
# The idx is a Scalar, ie a Type. This means the actual index
# is contained in node.inputs[1]
dim_index = node.inputs[node_inputs_idx]
if type(dim_index) == theano.scalar.basic.ScalarConstant:
dim_index = dim_index.value
if dim_index in [0, -1] and node.inputs[0].broadcastable[dim]:
remove_dim.append(dim)
node_inputs_idx += 1
else:
return
elif isinstance(elem, slice):
if elem != slice(None):
return
elif isinstance(elem, (integer_types, numpy.integer)):
if elem in [0, -1] and node.inputs[0].broadcastable[dim]:
remove_dim.append(dim)
else:
raise TypeError('case not expected')
if len(remove_dim) == 0:
return
else:
all_dim = range(node.inputs[0].ndim)
remain_dim = [x for x in all_dim if x not in remove_dim]
return [node.inputs[0].dimshuffle(tuple(remain_dim))]
@register_specialize
@register_canonicalize('fast_compile_gpu')
@register_useless
@gof.local_optimizer([Subtensor, AdvancedSubtensor1])
def local_subtensor_make_vector(node):
"""
Replace all subtensor(make_vector) like:
[a,b,c][0] -> a
[a,b,c][0:2] -> [a,b]
Replace all AdvancedSubtensor1(make_vector) like:
[a,b,c][[0,2]] -> [a,c]
We can do this for constant indexes.
"""
x = node.inputs[0]
if not x.owner or x.owner.op != make_vector:
return
if isinstance(node.op, Subtensor):
# This optimization needs ShapeOpt and fgraph.shape_feature
try:
idx, = node.op.idx_list
except Exception:
# 'how can you have multiple indexes into a shape?'
raise
if isinstance(idx, (scalar.Scalar, T.TensorType)):
# The idx is a Scalar, ie a Type. This means the actual index
# is contained in node.inputs[1]
old_idx, idx = idx, node.inputs[1]
assert idx.type == old_idx
elif isinstance(node.op, AdvancedSubtensor1):
idx = node.inputs[1]
else:
return
if isinstance(idx, (integer_types, numpy.integer)):
# We don't need to copy over any stack traces here
return [x.owner.inputs[idx]]
elif isinstance(idx, Variable):
if idx.ndim == 0:
# if it is a constant we can do something with it
try:
v = get_scalar_constant_value(idx, only_process_constants=True)
if isinstance(v, numpy.integer):
# Python 2.4 wants to index only with Python integers
v = int(v)
# We don't need to copy over any stack traces here
try:
ret = [x.owner.inputs[v]]
except IndexError:
raise NotScalarConstantError("Bad user graph!")
return ret
except NotScalarConstantError:
pass
elif idx.ndim == 1 and isinstance(idx, T.Constant):
values = list(map(int, list(idx.value)))
ret = make_vector(*[x.owner.inputs[v] for v in values])
# Copy over stack trace from previous output to new output
copy_stack_trace(node.outputs[0], ret)
ret = T.patternbroadcast(ret, node.outputs[0].broadcastable)
return [ret]
else:
raise TypeError('case not expected')
elif isinstance(idx, slice):
# it is a slice of ints and/or Variables
# check subtensor to see if it can contain constant variables, and if
# it can, then try to unpack them.
try:
const_slice = node.op.get_constant_idx(node.inputs,
allow_partial=False)[0]
ret = make_vector(*x.owner.inputs[const_slice])
# Copy over stack trace from previous outputs to new output
copy_stack_trace(node.outputs, ret)
ret = T.patternbroadcast(ret, node.outputs[0].broadcastable)
return [ret]
except NotScalarConstantError:
pass
else:
raise TypeError('case not expected')
# TODO: the other optimization for and, or, xor, le and ge see ticket #496.
@register_useless
@register_canonicalize('fast_compile')
@register_specialize
@gof.local_optimizer([T.Elemwise])
def local_useless_elemwise(node):
"""
eq(x,x) -> 1
neq(x,x) -> 0
mul(x) -> x
add(x) -> x
identity(x) -> x
and(x,1) -> x
and(x,0) -> zeros_like(x)
or(x,0) -> x
or(x,1) -> ones_like(x)
xor(x,x) -> zeros_like(x)
"""
if isinstance(node.op, T.Elemwise):
# We call zeros_like and one_like with opt=True to generate a
# cleaner graph.
dtype = node.outputs[0].dtype
if node.op.scalar_op == theano.scalar.eq and len(node.inputs) == 2:
if node.inputs[0] == node.inputs[1]:
# it is the same var in the graph. That will always be true
ret = T.ones_like(node.inputs[0], dtype=dtype, opt=True)
# Copy stack trace from input to constant output
copy_stack_trace(node.outputs[0], ret)
return [ret]
elif node.op.scalar_op == theano.scalar.neq and len(node.inputs) == 2:
if node.inputs[0] == node.inputs[1]:
# it is the same var in the graph. That will always be false
ret = T.zeros_like(node.inputs[0], dtype=dtype, opt=True)
# Copy stack trace from input to constant output
copy_stack_trace(node.outputs[0], ret)
return [ret]
elif node.op.scalar_op == theano.scalar.mul and len(node.inputs) == 1:
# No need to copy over any stack trace
return [node.inputs[0]]
elif node.op.scalar_op == theano.scalar.add and len(node.inputs) == 1:
# No need to copy over any stack trace
return [node.inputs[0]]
elif (node.op.scalar_op == theano.scalar.identity and
len(node.inputs) == 1):
return [node.inputs[0]]
elif (isinstance(node.op.scalar_op, scalar.AND) and
len(node.inputs) == 2):
if isinstance(node.inputs[0], T.TensorConstant):
const_val = T.extract_constant(node.inputs[0], only_process_constants=True)
if not isinstance(const_val, Variable):
if const_val == 0:
return [T.zeros_like(node.inputs[1], dtype=dtype,
opt=True)]
else:
return [node.inputs[1].astype(node.outputs[0].dtype)]
if isinstance(node.inputs[1], T.TensorConstant):
const_val = T.extract_constant(node.inputs[1], only_process_constants=True)
if not isinstance(const_val, Variable):
if const_val == 0:
return [T.zeros_like(node.inputs[0], dtype=dtype,
opt=True)]
else:
return [node.inputs[0].astype(node.outputs[0].dtype)]
elif (isinstance(node.op.scalar_op, scalar.OR) and
len(node.inputs) == 2):
if isinstance(node.inputs[0], T.TensorConstant):
const_val = T.extract_constant(node.inputs[0], only_process_constants=True)
if not isinstance(const_val, Variable):
if const_val == 0:
return [node.inputs[1].astype(node.outputs[0].dtype)]
else:
return [T.ones_like(node.inputs[1], dtype=dtype,
opt=True)]
if isinstance(node.inputs[1], T.TensorConstant):
const_val = T.extract_constant(node.inputs[1], only_process_constants=True)
if not isinstance(const_val, Variable):
if const_val == 0:
return [node.inputs[0].astype(node.outputs[0].dtype)]
else:
return [T.ones_like(node.inputs[0], dtype=dtype,
opt=True)]
elif (isinstance(node.op.scalar_op, scalar.XOR) and
len(node.inputs) == 2):
if node.inputs[0] is node.inputs[1]:
return [T.zeros_like(node.inputs[0], dtype=dtype, opt=True)]
@register_specialize
@gof.local_optimizer([T.Elemwise])
def local_alloc_unary(node):
"""unary(alloc(x, shp)) -> alloc(unary(x), shp)"""
if isinstance(node.op, T.Elemwise) and len(node.inputs) == 1:
a = node.inputs[0]
if a.owner and isinstance(a.owner.op, T.Alloc):
x = a.owner.inputs[0]
shp = a.owner.inputs[1:]
v = node.op(x)
# T.alloc does not preserve the stacktrace of v,
# so we need to copy it over from x.
copy_stack_trace(node.outputs[0], v)
ret = T.alloc(T.cast(v, node.outputs[0].dtype), *shp)
# T.cast does not preserve the stacktrace of x,
# so we need to copy it over to the output.
copy_stack_trace([node.outputs[0], a], ret)
return [ret]
@register_canonicalize
@register_specialize
@gof.local_optimizer([T.Elemwise])
def local_cast_cast(node):
"""cast(cast(x, dtype1), dtype2)
when those contrain:
dtype1 == dtype2
TODO: the base dtype is the same (int, uint, float, complex)
and the first cast cause an upcast.
"""
if (not isinstance(node.op, T.Elemwise) or
not isinstance(node.op.scalar_op, scalar.Cast)):
return
x = node.inputs[0]
if (not x.owner or
not isinstance(x.owner.op, T.Elemwise) or
not isinstance(x.owner.op.scalar_op, scalar.Cast)):
return
if node.op.scalar_op.o_type == x.owner.op.scalar_op.o_type:
# We don't need to copy over any stack traces here
return [x]
@register_canonicalize
@register_specialize
@gof.local_optimizer([T.Elemwise])
def local_func_inv(node):
"""
Check for two consecutive operations that are functional inverses
and remove them from the function graph.
"""
inv_pairs = (
(basic.Deg2Rad, basic.Rad2Deg),
(basic.Cosh, basic.ArcCosh),
(basic.Tanh, basic.ArcTanh),
(basic.Sinh, basic.ArcSinh),
(basic.Conj, basic.Conj),
(basic.Neg, basic.Neg),
(basic.Inv, basic.Inv),
)
x = node.inputs[0]
if not isinstance(node.op, T.Elemwise):
return
if (not x.owner or not isinstance(x.owner.op, T.Elemwise)):
return
prev_op = x.owner.op.scalar_op
node_op = node.op.scalar_op
for inv_pair in inv_pairs:
if is_inverse_pair(node_op, prev_op, inv_pair):
# We don't need to copy stack trace, because the optimization
# is trivial and maintains the earlier stack trace
return x.owner.inputs
return
def is_inverse_pair(node_op, prev_op, inv_pair):
"""
Given two consecutive operations, check if they are the
provided pair of inverse functions.
"""
node_is_op0 = isinstance(node_op, inv_pair[0])
node_is_op1 = isinstance(node_op, inv_pair[1])
prev_is_op0 = isinstance(prev_op, inv_pair[0])
prev_is_op1 = isinstance(prev_op, inv_pair[1])
return (node_is_op0 and prev_is_op1) or (node_is_op1 and prev_is_op0)
class Assert(T.Op):
"""
Implements assertion in a computational graph.
Returns the first parameter if the condition is true, otherwise, triggers
AssertionError.
Notes
-----
This Op is a debugging feature. It can be removed from the graph
because of optimizations, and can hide some possible optimizations to
the optimizer. Specifically, removing happens if it can be determined
that condition will always be true. Also, the output of the Op must be
used in the function computing the graph, but it doesn't have to be
returned.
Examples
--------
>>> import theano
>>> T = theano.tensor
>>> x = T.vector('x')
>>> assert_op = T.opt.Assert()
>>> func = theano.function([x], assert_op(x, x.size<2))
"""
_f16_ok = True
__props__ = ('msg',)
view_map = {0: [0]}
check_input = False
def __init__(self, msg="Theano Assert failed!"):
self.msg = msg
def __setstate__(self, attrs):
self.__dict__.update(attrs)
if not hasattr(self, 'msg'):
self.msg = "Theano Assert failed!"
def make_node(self, value, *conds):
if not isinstance(value, Variable):
value = T.as_tensor_variable(value)
cond = [T.as_tensor_variable(c) for c in conds]
assert numpy.all([c.type.ndim == 0 for c in cond])
return gof.Apply(self, [value] + cond, [value.type()])
def perform(self, node, inputs, out_):
out, = out_
v = inputs[0]
out[0] = v
assert numpy.all(inputs[1:]), self.msg
def grad(self, input, output_gradients):
return output_gradients + [DisconnectedType()()] * (len(input) - 1)
def connection_pattern(self, node):
return [[1]] + [[0]] * (len(node.inputs) - 1)
def c_code(self, node, name, inames, onames, sub):
value = inames[0]
out = onames[0]
check = []
fail = sub['fail']
msg = self.msg.replace('"', '\\"').replace('\n', '\\n')
for idx in xrange(len(inames) - 1):
i = inames[idx + 1]
dtype = node.inputs[idx + 1].dtype
check.append('if(!((npy_%(dtype)s*)PyArray_DATA(%(i)s))[0])'
'{PyErr_SetString(PyExc_AssertionError,"%(msg)s");'
'%(fail)s}' % locals())
check = "\n".join(check)
return """
%(check)s
Py_XDECREF(%(out)s);
%(out)s = %(value)s;
Py_INCREF(%(value)s);
""" % locals()
def c_code_cache_version(self):
return (3, 0)
def infer_shape(self, node, input_shapes):
return [input_shapes[0]]
assert_ = Assert()
# Unittest.assert_ is a deprecated name for assertTrue.
# 2to3 convert theano.tensor.opt.assert_ to theano.tensor.opt.assertTrue
# So I define a new name as a work around.
assert_op = assert_
@register_specialize
@gof.local_optimizer([Assert])
def local_remove_useless_assert(node):
if isinstance(node.op, Assert):
cond = []
for c in node.inputs[1:]:
try:
const = get_scalar_constant_value(c)
if 0 != const.ndim or const == 0:
# Should we raise an error here? How to be sure it
# is not catched?
cond.append(c)
except NotScalarConstantError:
cond.append(c)
if len(cond) == 0:
# We don't need to copy over any stack traces here
return [node.inputs[0]]
if len(cond) != len(node.inputs) - 1:
ret = assert_(node.inputs[0], *cond)
# We copy over stack trace from the output of the original assert
copy_stack_trace(node.outputs[0], ret)
return [ret]
@gof.local_optimizer([Assert])
def local_remove_all_assert(node):
"""An optimization disabled by default that removes all asserts from
the graph.
Notes
-----
See the :ref:`unsafe` section to know how to enable it.
"""
if not isinstance(node.op, Assert):
return
# We don't need to copy over any stack traces here
return [node.inputs[0]]
# Disabled by default
compile.optdb['canonicalize'].register('local_remove_all_assert',
local_remove_all_assert,
'unsafe',
use_db_name_as_tag=False)
compile.optdb['stabilize'].register('local_remove_all_assert',
local_remove_all_assert,
'unsafe',
use_db_name_as_tag=False)
compile.optdb['specialize'].register('local_remove_all_assert',
local_remove_all_assert,
'unsafe',
use_db_name_as_tag=False)
compile.optdb['useless'].register('local_remove_all_assert',
local_remove_all_assert,
'unsafe',
use_db_name_as_tag=False)
#######################
# Constant Canonicalization
############################
@register_canonicalize
@gof.local_optimizer([T.Elemwise])
def local_upcast_elemwise_constant_inputs(node):
"""This explicitly upcasts constant inputs to elemwise Ops, when
those Ops do implicit upcasting anyway.
Rationale: it helps merge things like (1-x) and (1.0 - x).
"""
if len(node.outputs) > 1:
return
try:
shape_i = node.fgraph.shape_feature.shape_i
except AttributeError:
shape_i = None
if isinstance(node.op, T.Elemwise):
scalar_op = node.op.scalar_op
# print "aa", scalar_op.output_types_preference
if (getattr(scalar_op, 'output_types_preference', None)
in (T.scal.upgrade_to_float, T.scal.upcast_out)):
# this is the kind of op that we can screw with the input
# dtypes by upcasting explicitly
output_dtype = node.outputs[0].type.dtype
new_inputs = []
for i in node.inputs:
if i.type.dtype == output_dtype:
new_inputs.append(i)
else:
try:
# works only for scalars
cval_i = get_scalar_constant_value(i,
only_process_constants=True)
if all(i.broadcastable):
new_inputs.append(T.shape_padleft(
T.cast(cval_i, output_dtype),
i.ndim))
else:
if shape_i is None:
return
new_inputs.append(
T.alloc(T.cast(cval_i, output_dtype),
*[shape_i(d)(i)
for d in xrange(i.ndim)]))
# print >> sys.stderr, "AAA",
# *[Shape_i(d)(i) for d in xrange(i.ndim)]
except NotScalarConstantError:
# for the case of a non-scalar
if isinstance(i, T.TensorConstant):
new_inputs.append(T.cast(i, output_dtype))
else:
new_inputs.append(i)
if new_inputs != node.inputs:
rval = [node.op(*new_inputs)]
if rval[0].type != node.outputs[0].type:
# This can happen for example when floatX=float32
# and we do the true division between and int64
# and a constant that will get typed as int8.
# As this is just to allow merging more case, if
# the upcast don't work, we can just skip it.
return
# Copy over output stacktrace from before upcasting
copy_stack_trace(node.outputs[0], rval)
return rval
##################
# Subtensor opts #
##################
@register_useless
@register_canonicalize
@register_specialize
@gof.local_optimizer([IncSubtensor])
def local_useless_inc_subtensor(node):
"""
Remove IncSubtensor, when we overwrite the full inputs with the
new value.
"""
if not isinstance(node.op, IncSubtensor):
return
if node.op.set_instead_of_inc is False:
# This is an IncSubtensor, so the init value must be zeros
try:
c = get_scalar_constant_value(node.inputs[0],
only_process_constants=True)
if c != 0:
return
except NotScalarConstantError:
return
if (node.inputs[0].ndim != node.inputs[1].ndim or
node.inputs[0].broadcastable != node.inputs[1].broadcastable):
# FB: I didn't check if this case can happen, but this opt
# don't support it.
return
# We have a SetSubtensor or an IncSubtensor on zeros
# If is this IncSubtensor useful?
# Check that we keep all the original data.
# Put the constant inputs in the slice.
idx_cst = get_idx_list(node.inputs[1:], node.op.idx_list)
if all(isinstance(e, slice) and e.start is None and
e.stop is None and (e.step is None or T.extract_constant(e.step,
only_process_constants=True) == -1)
for e in idx_cst):
# IncSubtensor broadcast node.inputs[1] on node.inputs[0]
# based on run time shapes, so we must check they are the same.
if not hasattr(node.fgraph, 'shape_feature'):
return
if not node.fgraph.shape_feature.same_shape(node.inputs[0],
node.inputs[1]):
return
# There is no reverse, so we don't need a replacement.
if all(e.step is None
for e in node.op.idx_list):
# They are the same shape, so we can remore this IncSubtensor
return [node.inputs[1]]
ret = Subtensor(node.op.idx_list)(*node.inputs[1:])
# Copy over previous output stacktrace
copy_stack_trace(node.outputs, ret)
return [ret]
@register_canonicalize
@gof.local_optimizer([AdvancedIncSubtensor1])
def local_set_to_inc_subtensor(node):
"""
AdvancedIncSubtensor1(x, x[ilist]+other, ilist, set_instead_of_inc=True) ->
AdvancedIncSubtensor1(x, other, ilist, set_instead_of_inc=False)
"""
if (isinstance(node.op, AdvancedIncSubtensor1) and
node.op.set_instead_of_inc and
node.inputs[1].owner and
isinstance(node.inputs[1].owner.op, Elemwise) and
isinstance(node.inputs[1].owner.op.scalar_op, scalar.Add)):
addn = node.inputs[1].owner
subn = None
other = None
if (addn.inputs[0].owner and
isinstance(addn.inputs[0].owner.op, AdvancedSubtensor1)):
subn = addn.inputs[0].owner
other = addn.inputs[1]
elif (addn.inputs[1].owner and
isinstance(addn.inputs[1].owner.op, AdvancedSubtensor1)):
subn = addn.inputs[1].owner
other = addn.inputs[0]
else:
return
if (subn.inputs[1] != node.inputs[2] or
subn.inputs[0] != node.inputs[0]):
return
ret = advanced_inc_subtensor1(node.inputs[0], other, node.inputs[2])
# Copy over previous output stacktrace
# Julian: I'm not sure about this at all...
copy_stack_trace(node.outputs, ret)
return [ret]
@register_useless
@register_canonicalize
@register_specialize
@gof.local_optimizer([Subtensor])
def local_useless_slice(node):
"""
Remove Subtensor of the form X[0, :] -> X[0]
"""
if isinstance(node.op, Subtensor):
slices = get_idx_list(node.inputs, node.op.idx_list)
last_slice = len(slices)
for s in slices[::-1]:
# check if slice and then check slice indices
if (isinstance(s, slice) and s.start is None and s.stop is None and
(s.step is None or T.extract_constant(s.step,
only_process_constants=True) == 1)):
last_slice -= 1
else:
break
# check if we removed something
if last_slice < len(slices):
subtens = Subtensor(slices[:last_slice])
sl_ins = Subtensor.collapse(slices[:last_slice],
lambda x: isinstance(x, T.Variable))
out = subtens(node.inputs[0], *sl_ins)
# Copy over previous output stacktrace
copy_stack_trace(node.outputs, out)
return [out]
@register_canonicalize
@register_specialize
@gof.local_optimizer([Subtensor, AdvancedSubtensor1])
def local_useless_subtensor(node):
"""
Remove Subtensor/AdvancedSubtensor1 if it takes the full input. In the
AdvancedSubtensor1 case, the full input is taken when the indices are
equivalent to `arange(0, input.shape[0], 1)` using either an explicit
list/vector or the ARange op.
"""
# If the optimization is tried over a node that is not a part of graph before
if not hasattr(node, 'fgraph'):
return
# This optimization needs ShapeOpt and fgraph.shape_feature
if not hasattr(node.fgraph, 'shape_feature'):
return
shape_of = node.fgraph.shape_feature.shape_of
if isinstance(node.op, Subtensor):
cdata = node.op.get_constant_idx(node.inputs, allow_partial=True,
only_process_constants=True)
| for pos, idx in enumerate(cdata): | 10,029 | lcc_e | python | null | 8152cfa361486a6a6f2953cf6740d89650cb310719efcbb1 |
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.bridge', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator', 'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator*', 'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator&', 'ns3::AttributeConstructionList::CIterator&')
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper [class]
module.add_class('BridgeHelper')
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
typehandlers.add_type_alias('void ( * ) ( ns3::Mac48Address )', 'ns3::Mac48Address::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Mac48Address )*', 'ns3::Mac48Address::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Mac48Address )&', 'ns3::Mac48Address::TracedCallback&')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## mac8-address.h (module 'network'): ns3::Mac8Address [class]
module.add_class('Mac8Address', import_from_module='ns.network')
## mac8-address.h (module 'network'): ns3::Mac8Address [class]
root_module['ns3::Mac8Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator', 'ns3::NetDeviceContainer::Iterator')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator*', 'ns3::NetDeviceContainer::Iterator*')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator&', 'ns3::NetDeviceContainer::Iterator&')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', import_from_module='ns.core', allow_subclassing=True)
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::ItemType [enumeration]
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::ObjectBase'], template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'])
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias('uint32_t', 'ns3::TypeId::hash_t')
typehandlers.add_type_alias('uint32_t*', 'ns3::TypeId::hash_t*')
typehandlers.add_type_alias('uint32_t&', 'ns3::TypeId::hash_t&')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-128.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-128.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )', 'ns3::Time::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )*', 'ns3::Time::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )&', 'ns3::Time::TracedCallback&')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', import_from_module='ns.core', automatic_type_narrowing=True, allow_subclassing=False, parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', import_from_module='ns.core', automatic_type_narrowing=True, allow_subclassing=False, parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## channel.h (module 'network'): ns3::Channel [class]
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
typehandlers.add_type_alias('void ( * ) ( )', 'ns3::NetDevice::LinkChangeTracedCallback')
typehandlers.add_type_alias('void ( * ) ( )*', 'ns3::NetDevice::LinkChangeTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( )&', 'ns3::NetDevice::LinkChangeTracedCallback&')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::NetDevice::ReceiveCallback')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::NetDevice::ReceiveCallback*')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::NetDevice::ReceiveCallback&')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'ns3::NetDevice::PromiscReceiveCallback')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::NetDevice::PromiscReceiveCallback*')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::NetDevice::PromiscReceiveCallback&')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > )', 'ns3::Packet::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > )*', 'ns3::Packet::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > )&', 'ns3::Packet::TracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )', 'ns3::Packet::AddressTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )*', 'ns3::Packet::AddressTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )&', 'ns3::Packet::AddressTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )', 'ns3::Packet::TwoAddressTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )*', 'ns3::Packet::TwoAddressTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )&', 'ns3::Packet::TwoAddressTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )', 'ns3::Packet::Mac48AddressTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )*', 'ns3::Packet::Mac48AddressTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )&', 'ns3::Packet::Mac48AddressTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )', 'ns3::Packet::SizeTracedCallback')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )*', 'ns3::Packet::SizeTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )&', 'ns3::Packet::SizeTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, double )', 'ns3::Packet::SinrTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, double )*', 'ns3::Packet::SinrTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, double )&', 'ns3::Packet::SinrTracedCallback&')
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## bridge-channel.h (module 'bridge'): ns3::BridgeChannel [class]
module.add_class('BridgeChannel', parent=root_module['ns3::Channel'])
## bridge-net-device.h (module 'bridge'): ns3::BridgeNetDevice [class]
module.add_class('BridgeNetDevice', parent=root_module['ns3::NetDevice'])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )', 'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )*', 'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )&', 'ns3::TracedValueCallback::Time&')
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3BridgeHelper_methods(root_module, root_module['ns3::BridgeHelper'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3Mac8Address_methods(root_module, root_module['ns3::Mac8Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3Channel_methods(root_module, root_module['ns3::Channel'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3BridgeChannel_methods(root_module, root_module['ns3::BridgeChannel'])
register_Ns3BridgeNetDevice_methods(root_module, root_module['ns3::BridgeNetDevice'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3BridgeHelper_methods(root_module, cls):
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper::BridgeHelper(ns3::BridgeHelper const & arg0) [constructor]
cls.add_constructor([param('ns3::BridgeHelper const &', 'arg0')])
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper::BridgeHelper() [constructor]
cls.add_constructor([])
## bridge-helper.h (module 'bridge'): ns3::NetDeviceContainer ns3::BridgeHelper::Install(ns3::Ptr<ns3::Node> node, ns3::NetDeviceContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::NetDeviceContainer', 'c')])
## bridge-helper.h (module 'bridge'): ns3::NetDeviceContainer ns3::BridgeHelper::Install(std::string nodeName, ns3::NetDeviceContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('std::string', 'nodeName'), param('ns3::NetDeviceContainer', 'c')])
## bridge-helper.h (module 'bridge'): void ns3::BridgeHelper::SetDeviceAttribute(std::string n1, ns3::AttributeValue const & v1) [member function]
cls.add_method('SetDeviceAttribute',
'void',
[param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')])
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function]
cls.add_method('GetRemainingSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function]
cls.add_method('Adjust',
'void',
[param('int32_t', 'adjustment')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeAccessor>::Delete(ns3::AttributeAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeAccessor *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeChecker> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeChecker > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeChecker>::Delete(ns3::AttributeChecker * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeChecker *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeValue> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeValue > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeValue>::Delete(ns3::AttributeValue * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeValue *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter(ns3::DefaultDeleter<ns3::CallbackImplBase> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::CallbackImplBase > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::CallbackImplBase>::Delete(ns3::CallbackImplBase * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::CallbackImplBase *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter(ns3::DefaultDeleter<ns3::Hash::Implementation> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::Hash::Implementation > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Hash::Implementation>::Delete(ns3::Hash::Implementation * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Hash::Implementation *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector>::DefaultDeleter(ns3::DefaultDeleter<ns3::NixVector> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::NixVector > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::NixVector>::Delete(ns3::NixVector * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::NixVector *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::TraceSourceAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::TraceSourceAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::TraceSourceAccessor>::Delete(ns3::TraceSourceAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::TraceSourceAccessor *', 'object')],
is_static=True)
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True, deprecated=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True, deprecated=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) const [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::HasPrefix(ns3::Ipv6Prefix const & prefix) const [member function]
cls.add_method('HasPrefix',
'bool',
[param('ns3::Ipv6Prefix const &', 'prefix')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True, deprecated=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True, deprecated=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac8Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac8Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac8Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac8Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix, uint8_t prefixLength) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix'), param('uint8_t', 'prefixLength')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix, uint8_t prefixLength) [constructor]
cls.add_constructor([param('char const *', 'prefix'), param('uint8_t', 'prefixLength')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetMinimumPrefixLength() const [member function]
cls.add_method('GetMinimumPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True, deprecated=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::SetPrefixLength(uint8_t prefixLength) [member function]
cls.add_method('SetPrefixLength',
'void',
[param('uint8_t', 'prefixLength')])
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3Mac8Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address(ns3::Mac8Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac8Address const &', 'arg0')])
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address() [constructor]
cls.add_constructor([])
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address(uint8_t addr) [constructor]
cls.add_constructor([param('uint8_t', 'addr')])
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac8Address',
[],
is_static=True)
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac8Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac8-address.h (module 'network'): void ns3::Mac8Address::CopyFrom(uint8_t const * pBuffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'pBuffer')])
## mac8-address.h (module 'network'): void ns3::Mac8Address::CopyTo(uint8_t * pBuffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'pBuffer')],
is_const=True)
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac8Address',
[],
is_static=True)
## mac8-address.h (module 'network'): static bool ns3::Mac8Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::Iterator ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'ns3::NetDeviceContainer::Iterator',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::Iterator ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'ns3::NetDeviceContainer::Iterator',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactory::IsTypeIdSet() const [member function]
cls.add_method('IsTypeIdSet',
'bool',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::type [variable]
cls.add_instance_attribute('type', 'ns3::PacketMetadata::Item::ItemType', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 1 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True, is_pure_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t v) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t v) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(std::size_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(std::size_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::hash_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'ns3::TypeId::hash_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint16_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint16_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint16_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint16_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(std::size_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(ns3::TypeId::hash_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(ns3::TypeId::hash_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(std::size_t i, ns3::Ptr<const ns3::AttributeValue> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('std::size_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::int64x64_t'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_unary_numeric_operator('-')
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(double const value) [constructor]
cls.add_constructor([param('double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long double const value) [constructor]
cls.add_constructor([param('long double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int const v) [constructor]
cls.add_constructor([param('int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long int const v) [constructor]
cls.add_constructor([param('long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int const v) [constructor]
cls.add_constructor([param('long long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int const v) [constructor]
cls.add_constructor([param('unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int const v) [constructor]
cls.add_constructor([param('long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int const v) [constructor]
cls.add_constructor([param('long long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t const hi, uint64_t const lo) [constructor]
cls.add_constructor([param('int64_t const', 'hi'), param('uint64_t const', 'lo')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t const v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t const', 'v')],
is_static=True)
## int64x64-128.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True, is_pure_virtual=True)
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')],
is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True, is_pure_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
is_virtual=True, visibility='protected')
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<const ns3::Object> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_virtual=True, is_pure_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')],
is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
| is_const=True, is_virtual=True, is_pure_virtual=True) | 10,722 | lcc_e | python | null | 5e257dacb588eede1548b87c86d7d1948fce16bb407c74bc |
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=C0302,C0103,R0902,R0904,R0913,W0212,W0621,R0912,R0921,R0914,W0403
################################################################################
#
# Controlling class
#
# == Data download and storage ==
# - Local data storage (local-mode)
# - Download from internet to cache (download-mode)
#
################################################################################
from __future__ import (absolute_import, division, print_function)
import bisect
try:
# python3
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.error import URLError
except ImportError:
from urllib2 import urlopen
from urllib2 import HTTPError
from urllib2 import URLError
from six.moves import range
import math
import csv
import random
import os
import numpy
from HFIR_4Circle_Reduction.fourcircle_utility import *
import HFIR_4Circle_Reduction.fourcircle_utility as fourcircle_utility
from HFIR_4Circle_Reduction.peakprocesshelper import PeakProcessRecord
from HFIR_4Circle_Reduction.peakprocesshelper import SinglePointPeakIntegration
from HFIR_4Circle_Reduction.peakprocesshelper import SinglePtScansIntegrationOperation
from HFIR_4Circle_Reduction import fputility
from HFIR_4Circle_Reduction import project_manager
from HFIR_4Circle_Reduction import peak_integration_utility
from HFIR_4Circle_Reduction import absorption
from HFIR_4Circle_Reduction import process_mask
import mantid
import mantid.simpleapi as mantidsimple
from mantid.api import AnalysisDataService
from mantid.kernel import V3D
from numpy import *
DebugMode = True
# DET_X_SIZE = 512
# DET_Y_SIZE = 512
MAX_SCAN_NUMBER = 100000
def check_str_type(variable, var_name):
"""
:param variable:
:param var_name:
:return:
"""
assert isinstance(var_name, str), 'Variable name {0} must be an integer but not a {1}' \
''.format(var_name, type(var_name))
assert isinstance(variable, str), '{0} {1} must be an string but not a {2}' \
''.format(var_name, variable, type(variable))
return
def check_float_type(variable, var_name):
"""
check whether a variable is an integer
:except AssertionError:
:param variable:
:param var_name:
:return:
"""
assert isinstance(var_name, str), 'Variable name {0} must be an integer but not a {1}' \
''.format(var_name, type(var_name))
assert isinstance(variable, int) or isinstance(variable, float), '{0} {1} must be an integer but not a {2}' \
''.format(var_name, variable, type(variable))
return
def check_int_type(variable, var_name):
"""
check whether a variable is an integer
:except AssertionError:
:param variable:
:param var_name:
:return:
"""
assert isinstance(var_name, str), 'Variable name {0} must be an integer but not a {1}' \
''.format(var_name, type(var_name))
assert isinstance(variable, int), '{0} {1} must be an integer but not a {2}' \
''.format(var_name, variable, type(variable))
return
class CWSCDReductionControl(object):
""" Controlling class for reactor-based single crystal diffraction reduction
"""
RESERVED_ROI_NAME = '__temp_roi__'
def __init__(self, instrument_name=None):
""" init
"""
if isinstance(instrument_name, str):
self._instrumentName = instrument_name
elif instrument_name is None:
self._instrumentName = ''
else:
raise RuntimeError('Instrument name %s of type %s is not allowed.' % (str(instrument_name),
str(type(instrument_name))))
# Experiment number, data storage
# No Use/Confusing: self._expNumber = 0
self._dataDir = None
self._workDir = '/tmp'
self._preprocessedDir = None
# dictionary for pre-processed scans. key = scan number, value = dictionary for all kinds of information
self._preprocessedInfoDict = None
self._myServerURL = ''
# Some set up
self._expNumber = None
# instrument default constants
self._defaultDetectorSampleDistance = None
# geometry of pixel
self._defaultPixelSizeX = None
self._defaultPixelSizeY = None
# user-defined wave length
self._userWavelengthDict = dict()
# default peak center
self._defaultDetectorCenter = None
# Container for MDEventWorkspace for each Pt.
self._myMDWsList = list()
# Container for loaded workspaces
self._mySpiceTableDict = {}
# Container for loaded raw pt workspace
self._myRawDataWSDict = dict()
self._myRawDataMasked = dict()
# Container for PeakWorkspaces for calculating UB matrix
# self._myUBPeakWSDict = dict()
# Container for UB matrix
self._myUBMatrixDict = dict()
# Peak Info
self._myPeakInfoDict = dict()
# Loaded peak information dictionary
self._myLoadedPeakInfoDict = dict()
# Sample log value look up table
self._2thetaLookupTable = dict()
# Last UB matrix calculated
self._myLastPeakUB = None
# Flag for data storage
self._cacheDataOnly = False
# Single PT scan integration:
# example: key = exp_number, scan_number, pt_number, roi_name][integration direction]
# value = vec_x, vec_y, cost, params
self._single_pt_integration_dict = dict()
# workspace for a exp, scan_numbers, roi_name
self._single_pt_matrix_dict = dict()
# Dictionary to store survey information
self._scanSummaryList = list()
# Tuple to hold the result of refining UB
self._refinedUBTup = None
# Record for merged scans
self._mergedWSManager = list()
# About K-shift for output of integrated peak
self._kVectorIndex = 1
self._kShiftDict = dict()
# A dictionary to manage all loaded and processed MDEventWorkspaces
# self._expDataDict = {}
self._detSampleDistanceDict = dict()
self._detCenterDict = dict()
# detector geometry: initialized to unphysical value
self._detectorSize = [-1, -1]
self._defaultPixelNumberX = None
self._defaultPixelNumberY = None
# reference workspace for LoadMask
self._refWorkspaceForMask = None
# Region of interest: key = roi name, value = RegionOfInterest instance
self._roiDict = dict()
# single point peak integration related
self._two_theta_scan_dict = dict()
self._scan_2theta_set = set()
self._two_theta_sigma = None # a 2-tuple vector for (2theta, gaussian-sigma)
self._current_single_pt_integration_key = None
self._curr_2theta_fwhm_func = None
# register startup
mantid.UsageService.registerFeatureUsage("Interface", "4-Circle Reduction", False)
# debug mode
self._debugPrintMode = True
return
@property
def pre_processed_dir(self):
"""
get the pre-processed directory
:return:
"""
return self._preprocessedDir
@pre_processed_dir.setter
def pre_processed_dir(self, dir_name):
"""
setting pre-processed directory
:param dir_name:
:return:
"""
# check
assert isinstance(dir_name, str) or dir_name is None, 'Directory {0} must be None or string.'.format(dir_name)
if os.path.exists(dir_name) is False:
raise RuntimeError('Pre-processed scans directory {0} does not exist!'.format(dir_name))
# set
self._preprocessedDir = dir_name
# load pre-processed scans' record file if possible
if self._expNumber is None:
raise RuntimeError('Experiment number {0} must be set up before pre-processesd scan directory is set.')
record_file_name = fourcircle_utility.pre_processed_record_file(self._expNumber, self._preprocessedDir)
if os.path.exists(record_file_name):
self._preprocessedInfoDict = fourcircle_utility.read_pre_process_record(record_file_name)
return
def _add_merged_ws(self, exp_number, scan_number, pt_number_list):
""" Record a merged workspace to
Requirements: experiment number, scan number and pt numbers are valid
:param exp_number:
:param scan_number:
:param pt_number_list:
:return:
"""
assert isinstance(exp_number, int) and isinstance(scan_number, int)
assert isinstance(pt_number_list, list) and len(pt_number_list) > 0
if (exp_number, scan_number, pt_number_list) in self._mergedWSManager:
return 'Exp %d Scan %d Pt %s has already been merged and recorded.' % (exp_number,
scan_number,
str(pt_number_list))
self._mergedWSManager.append((exp_number, scan_number, pt_number_list))
self._mergedWSManager.sort()
return
@staticmethod
def generate_single_pt_scans_key(exp_number, scan_number_list, roi_name, integration_direction):
"""
generate a unique but repeatable key for multiple single-pt scans
:param exp_number:
:param scan_number_list:
:param roi_name:
:param integration_direction:
:return:
"""
# do some math to scan numbers
check_list('Scan numbers', scan_number_list)
scan_number_vec = numpy.array(scan_number_list)
unique = numpy.sum(scan_number_vec, axis=0)
ws_key = 'e{}_s{}-{}:{}_{}_{}'.format(exp_number, scan_number_list[0], scan_number_list[-1], unique,
roi_name, integration_direction)
return ws_key
def add_k_shift_vector(self, k_x, k_y, k_z):
"""
Add a k-shift vector
:param k_x:
:param k_y:
:param k_z:
:return: k_index of the (k_x, k_y, k_z)
"""
# check
assert isinstance(k_x, float), 'Kx is wrong'
assert isinstance(k_y, float), 'Ky is wrong'
assert isinstance(k_z, float), 'Kz is wrong'
k_shift_vector = (k_x, k_y, k_z)
self._kShiftDict[self._kVectorIndex] = [k_shift_vector, []]
# make progress
return_k_index = self._kVectorIndex
self._kVectorIndex += 1
return return_k_index
def apply_mask(self, exp_number, scan_number, pt_number, roi_name=None):
"""
Apply mask on a Pt./run. by using a standard non-tag-based mask workspace's name
Requirements:
1. exp number, scan number, and pt number are integers
2. mask workspace for this can must exist!
Guarantees:
the detector-xml data file is loaded to workspace2D with detectors being masked
:param exp_number:
:param scan_number:
:param pt_number:
:param roi_name: a string or a None
:return:
"""
# check
assert isinstance(exp_number, int), 'Exp number {0} must be an integer but not a {1}' \
''.format(exp_number, type(exp_number))
assert isinstance(scan_number, int), 'Scan number {0} must be an integer but not a {1}' \
''.format(scan_number, type(scan_number))
assert isinstance(pt_number, int), 'Pt number {0} must be an integer but not a {1}' \
''.format(pt_number, type(pt_number))
# get raw workspace for counts
raw_pt_ws_name = get_raw_data_workspace_name(exp_number, scan_number, pt_number)
# an existing mask
if roi_name not in self._roiDict:
raise RuntimeError('ROI {0} is not in mask workspace dictionary. Current keys are {1}'
''.format(roi_name, self._roiDict.keys()))
mask_ws_name = self._roiDict[roi_name].mask_workspace
if mask_ws_name is None:
raise RuntimeError('ROI {0} has no mask workspace set'.format(roi_name))
# mask detectors
mantidsimple.MaskDetectors(Workspace=raw_pt_ws_name, MaskedWorkspace=mask_ws_name)
# record
self._myRawDataMasked[(exp_number, scan_number, pt_number)] = roi_name
return
def check_2theta_fwhm_formula(self, formula):
"""
check whether a formula can be used to calculate FWHM from 2theta.
If it is a valid formula, set as a class variable
:param formula:
:return: 2-tuple
"""
assert isinstance(formula, str), '2theta-FWHM formula {} must be a string but not a {}' \
''.format(formula, type(formula))
try:
equation = 'lambda x: {}'.format(formula)
fwhm_func = eval(equation)
except SyntaxError as syn_err:
return False, 'Unable to accept 2theta-FWHM formula {} due to {}'.format(formula, syn_err)
self._curr_2theta_fwhm_func = fwhm_func
return True, None
def find_peak(self, exp_number, scan_number, pt_number_list=None):
""" Find 1 peak in sample Q space for UB matrix
:param exp_number:
:param scan_number:
:param pt_number_list:
:return:tuple as (boolean, object) such as (false, error message) and (true, PeakInfo object)
This part will be redo as 11847_Load_HB3A_Experiment
"""
# Check & set pt. numbers
check_int_type(exp_number, 'Experiment number')
check_int_type(scan_number, 'Scan Number')
if pt_number_list is None:
status, pt_number_list = self.get_pt_numbers(exp_number, scan_number)
assert status, 'Unable to get Pt numbers from scan %d.' % scan_number
assert isinstance(pt_number_list, list) and len(pt_number_list) > 0
# Check whether the MDEventWorkspace used to find peaks exists
if self.has_merged_data(exp_number, scan_number, pt_number_list):
pass
else:
raise RuntimeError('Data must be merged before')
# Find peak in Q-space
merged_ws_name = get_merged_md_name(self._instrumentName, exp_number, scan_number, pt_number_list)
peak_ws_name = get_peak_ws_name(exp_number, scan_number, pt_number_list)
mantidsimple.FindPeaksMD(InputWorkspace=merged_ws_name,
MaxPeaks=10,
PeakDistanceThreshold=5.,
DensityThresholdFactor=0.1,
OutputWorkspace=peak_ws_name)
assert AnalysisDataService.doesExist(peak_ws_name), 'PeaksWorkspace {0} does not exist in ADS.' \
''.format(peak_ws_name)
# add peak to UB matrix workspace to manager
self._set_peak_info(exp_number, scan_number, peak_ws_name, merged_ws_name)
# add the merged workspace to list to manage
self._add_merged_ws(exp_number, scan_number, pt_number_list)
peak_center = self._myPeakInfoDict[(exp_number, scan_number)].get_peak_centre()
return True, peak_center
@staticmethod
def find_detector_size(exp_directory, exp_number):
"""
find detector size from experiment directory
:param exp_directory:
:param exp_number
:return:
"""
# guess the file name
first_xm_file = os.path.join(exp_directory, 'HB3A_Exp{0}_Scan0001_00001.xml'.format(exp_number))
if os.path.exists(first_xm_file):
file_size = os.path.getsize(first_xm_file)
if file_size < 136132 * 2:
det_size = 256, 256
elif file_size < 529887 * 2:
det_size = 512, 512
else:
raise RuntimeError('File size is over {0}. It is not supported.')
return True, det_size
return False, 'Unable to find first Pt file {0}'.format(first_xm_file)
def calculate_intensity_single_pt(self, exp_number, scan_number, pt_number, roi_name, ref_fwhm, is_fwhm):
"""
calculate single-point-measurement peak/scan's intensity
:param exp_number:
:param scan_number:
:param pt_number:
:param roi_name:
:param ref_fwhm:
:param is_fwhm:
:return:
"""
# check inputs
assert isinstance(exp_number, int), 'Experiment number {0} must be an integer but not a {1}' \
''.format(exp_number, type(exp_number))
assert isinstance(scan_number, int), 'Scan number {0} must be an integer.'.format(scan_number)
assert isinstance(pt_number, int), 'Pt number {0} must be an integer'.format(pt_number)
assert isinstance(roi_name, str), 'ROI name {0} must be a string'.format(roi_name)
check_float_type(ref_fwhm, 'Reference FWHM')
# check whether the detector counts has been calculated and get the value
if (exp_number, scan_number, pt_number, roi_name) not in self._single_pt_integration_dict:
raise RuntimeError('Exp {0} Scan {1} Pt {2} ROI {3} does not exist in single-point integration '
'dictionary, whose keys are {4}'.format(exp_number, scan_number, pt_number, roi_name,
self._single_pt_integration_dict.keys()))
integration_record = self._single_pt_integration_dict[exp_number, scan_number, pt_number, roi_name]
# integration_record.set_ref_peak_width(ref_fwhm, is_fwhm)
# params = integration_record
#
# # get 2theta value from
# two_theta = self.get_sample_log_value(exp_number, scan_number, pt_number, '2theta')
# ref_exp_number, ref_scan_number, integrated_peak_params = self.get_integrated_scan_params(exp_number,
# two_theta,
# resolution=0.01)
peak_intensity = peak_integration_utility.calculate_single_pt_scan_peak_intensity(
integration_record.get_pt_intensity(), ref_fwhm, is_fwhm)
integration_record.set_peak_intensity(peak_intensity)
return peak_intensity
def get_single_scan_pt_model(self, exp_number, scan_number, pt_number, roi_name, integration_direction):
""" get a single-pt scan summed 1D data either vertically or horizontally with model data
:param exp_number:
:param scan_number:
:param pt_number:
:param roi_name:
:param integration_direction:
:return: 2-tuple.. vector model
"""
# get record key
ws_record_key = self._current_single_pt_integration_key
print('[DB...BAT] Retrieve ws record key: {}'.format(ws_record_key))
# TODO - 20180814 - Check pt number, rio name and integration direction
if ws_record_key in self._single_pt_matrix_dict:
# check integration manager
integration_manager = self._single_pt_matrix_dict[ws_record_key]
assert integration_manager.exp_number == exp_number, 'blabla'
else:
raise RuntimeError('Last single-pt integration manager (key) {} does not exist.'
.format(ws_record_key))
matrix_ws = AnalysisDataService.retrieve(integration_manager.get_model_workspace())
ws_index = integration_manager.get_spectrum_number(scan_number, from_zero=True)
vec_x = matrix_ws.readX(ws_index)
vec_model = matrix_ws.readY(ws_index)
return vec_x, vec_model
def get_single_scan_pt_summed(self, exp_number, scan_number, pt_number, roi_name, integration_direction):
""" get a single scan Pt. 's on-detector in-roi integration result
:param exp_number:
:param scan_number:
:param pt_number:
:param roi_name:
:param integration_direction: vector X, vector Y (raw), vector Y (model)
:return:
"""
integration_record = self.get_single_pt_info(exp_number, scan_number, pt_number, roi_name,
integration_direction)
vec_x, vec_y = integration_record.get_vec_x_y()
return vec_x, vec_y
def get_single_pt_info(self, exp_number, scan_number, pt_number, roi_name, integration_direction):
""" get the integrated single-pt scan data
:param exp_number:
:param scan_number:
:param pt_number:
:param roi_name:
:return:
"""
try:
peak_info = self._single_pt_integration_dict[exp_number, scan_number, pt_number, roi_name]
except KeyError:
err_message = 'Exp {0} Scan {1} Pt {2} ROI {3} does not exit in Single-Pt-Integration dictionary ' \
'which has keys: {4}'.format(exp_number, scan_number, pt_number, roi_name,
self._single_pt_integration_dict.keys())
raise RuntimeError(err_message)
try:
peak_info = peak_info[integration_direction]
except KeyError:
err_message = 'Exp {0} Scan {1} Pt {2} ROI {3} does not have integration direction {4}' \
'in Single-Pt-Integration dictionary which has keys: {5}' \
''.format(exp_number, scan_number, pt_number, roi_name, integration_direction,
sorted(peak_info.keys()))
raise RuntimeError(err_message)
return peak_info
def calculate_peak_integration_sigma(self, two_theta):
"""
calculate Gaussian-Sigma for single-measurement peak integration by linear interpolation
:param two_theta:
:return: float
"""
if self._two_theta_sigma is None:
raise RuntimeError('2-theta Gaussian-sigma curve has not been set')
# do a linear interpolation
interp_sigma = numpy.interp(two_theta, self._two_theta_sigma[0], self._two_theta_sigma[1])
print ('[DB...BAT] 2theta = {0}: output sigma = {1}'.format(two_theta, interp_sigma))
print ('[DB...BAT] X = {0}, Y = {1}'.format(self._two_theta_sigma[0], self._two_theta_sigma[1]))
return interp_sigma
def calculate_ub_matrix(self, peak_info_list, a, b, c, alpha, beta, gamma):
"""
Calculate UB matrix
Requirements: two or more than 2 peaks (PeakInfo) are specified
Set Miller index from raw data in Workspace2D.
:param peak_info_list:
:param a:
:param b:
:param c:
:param alpha:
:param beta:
:param gamma:
:return:
"""
# Check
assert isinstance(peak_info_list, list)
num_peak_info = len(peak_info_list)
if num_peak_info < 2:
return False, 'Too few peaks are input to calculate UB matrix. Must be >= 2.'
for peak_info in peak_info_list:
if isinstance(peak_info, PeakProcessRecord) is False:
raise NotImplementedError('Input PeakList is of type %s.' % str(type(peak_info_list[0])))
assert isinstance(peak_info, PeakProcessRecord)
# Construct a new peak workspace by combining all single peak
ub_peak_ws_name = 'Temp_UB_Peak'
self._build_peaks_workspace(peak_info_list, ub_peak_ws_name)
# index_from_spice=True, hkl_to_int=True)
# Calculate UB matrix
try:
mantidsimple.CalculateUMatrix(PeaksWorkspace=ub_peak_ws_name,
a=a, b=b, c=c, alpha=alpha, beta=beta, gamma=gamma)
except ValueError as val_err:
return False, str(val_err)
ub_peak_ws = AnalysisDataService.retrieve(ub_peak_ws_name)
ub_matrix = ub_peak_ws.sample().getOrientedLattice().getUB()
self._myLastPeakUB = ub_peak_ws
return True, ub_matrix
def does_raw_loaded(self, exp_no, scan_no, pt_no, roi_name):
"""
Check whether the raw Workspace2D for a Pt. exists
:param exp_no:
:param scan_no:
:param pt_no:
:param roi_name:
:return:
"""
# check input
check_int_type(exp_no, 'Experiment number')
check_int_type(scan_no, 'Scan number')
check_int_type(pt_no, 'Pt number')
loaded = (exp_no, scan_no, pt_no) in self._myRawDataWSDict
if loaded:
curr_roi = self._myRawDataMasked[(exp_no, scan_no, pt_no)]
if roi_name != curr_roi:
loaded = False
return loaded
def does_spice_loaded(self, exp_no, scan_no):
""" Check whether a SPICE file has been loaded
:param exp_no:
:param scan_no:
:return:
"""
return (exp_no, scan_no) in self._mySpiceTableDict
def download_spice_file(self, exp_number, scan_number, over_write):
"""
Download a scan/pt data from internet
:param exp_number: experiment number
:param scan_number:
:param over_write:
:return: 2-tuple: status (successful or failed), string (file name or error message
"""
# Check
if exp_number is None:
exp_number = self._expNumber
assert isinstance(exp_number, int)
assert isinstance(scan_number, int)
# Generate the URL for SPICE data file
file_url = get_spice_file_url(self._myServerURL, self._instrumentName, exp_number, scan_number)
file_name = get_spice_file_name(self._instrumentName, exp_number, scan_number)
file_name = os.path.join(self._dataDir, file_name)
if os.path.exists(file_name) is True and over_write is False:
return True, file_name
# Download
try:
mantidsimple.DownloadFile(Address=file_url, Filename=file_name)
except RuntimeError as run_err:
return False, str(run_err)
# Check file exist?
if os.path.exists(file_name) is False:
return False, "Unable to locate downloaded file %s." % file_name
return True, file_name
def download_spice_xml_file(self, scan_no, pt_no, exp_no=None, overwrite=False):
""" Download a SPICE XML file for one measurement in a scan
:param scan_no:
:param pt_no:
:param exp_no:
:param overwrite:
:return: tuple (boolean, local file name/error message)
"""
# Experiment number
if exp_no is None:
exp_no = self._expNumber
# Form the target file name and path
det_xml_file_name = get_det_xml_file_name(self._instrumentName, exp_no, scan_no, pt_no)
local_xml_file_name = os.path.join(self._dataDir, det_xml_file_name)
if os.path.exists(local_xml_file_name) is True and overwrite is False:
return True, local_xml_file_name
# Generate the URL for XML file
det_file_url = get_det_xml_file_url(self._myServerURL, self._instrumentName, exp_no, scan_no, pt_no)
# Download
try:
mantidsimple.DownloadFile(Address=det_file_url,
Filename=local_xml_file_name)
except RuntimeError as run_err:
return False, 'Unable to download Detector XML file %s from %s ' \
'due to %s.' % (local_xml_file_name, det_file_url, str(run_err))
# Check file exist?
if os.path.exists(local_xml_file_name) is False:
return False, "Unable to locate downloaded file %s." % local_xml_file_name
# NEXT ISSUE - This is a temporary fix for unsupported strings in XML
os.system("sed -i -e 's/0<x<1/0 x 1/g' %s" % local_xml_file_name)
return True, local_xml_file_name
def download_data_set(self, scan_list, overwrite=False):
"""
Download data set including (1) spice file for a scan and (2) XML files for measurements
:param scan_list:
:return:
"""
# Check
if self._expNumber is None:
raise RuntimeError('Experiment number is not set up for controller.')
error_message = ''
for scan_no in scan_list:
# Download single spice file for a run
status, ret_obj = self.download_spice_file(exp_number=self._expNumber,
scan_number=scan_no,
over_write=overwrite)
# Reject if SPICE file cannot download
if status is False:
error_message += '%s\n' % ret_obj
continue
# Load SPICE file to Mantid
spice_file_name = ret_obj
status, ret_obj = self.load_spice_scan_file(self._expNumber, scan_no, spice_file_name)
if status is False:
error_message = ret_obj
return False, error_message
else:
# spice_table = self._mySpiceTableDict[(self._expNumber, scan_no)]
spice_table = self._get_spice_workspace(self._expNumber, scan_no)
assert spice_table
pt_no_list = self._get_pt_list_from_spice_table(spice_table)
# Download all single-measurement file
for pt_no in pt_no_list:
status, ret_obj = self.download_spice_xml_file(scan_no, pt_no, overwrite=overwrite)
if status is False:
error_message += '%s\n' % ret_obj
# END-FOR
# END-FOR (scan_no)
return True, error_message
def check_generate_mask_workspace(self, exp_number, scan_number, mask_tag, check_throw):
"""
Check whether a MaskWorkspace exists according to the tag
If it does not, then generate one according to the tag
A MaskWorkspace's name is exactly the same as the tag of the mask specified by user in
reduction GUI.
:param exp_number: must be integer if not retrieve mask workspace
:param scan_number: must be integer if not retrieve mask workspace
:param mask_tag: string as the tag of the mask.
:param check_throw
:return:
"""
# Check
assert isinstance(mask_tag, str), 'Mask tag {0} ({1}) must be a string.'.format(mask_tag, type(mask_tag))
# MaskWorkspace's name is same as mask's tag
mask_ws_name = mask_tag
if AnalysisDataService.doesExist(mask_ws_name) is False:
# if the workspace does not exist, create a new mask workspace
if exp_number is None:
raise RuntimeError('Experiment number is not given with assumption that mask tag {0} shall '
'be a workspace.'.format(mask_tag))
# check for experiment and scan number
assert isinstance(exp_number, int), 'Experiment number {0} must be an integer but not a {1}.' \
''.format(exp_number, type(exp_number))
assert isinstance(scan_number, int), 'Scan number {0} ({1}) must be an integer.' \
''.format(scan_number, type(scan_number))
if mask_tag not in self._roiDict:
raise RuntimeError('Mask tag |{0}| does not exist in ROI dictionary.'.format(mask_tag))
region_of_interest = self._roiDict[mask_tag]
ll = region_of_interest[0]
ur = region_of_interest[1]
self.generate_mask_workspace(exp_number, scan_number, ll, ur, mask_ws_name)
if check_throw:
assert AnalysisDataService.doesExist(mask_ws_name), 'MaskWorkspace {0} does not exist.'.format(mask_ws_name)
return mask_ws_name
def does_spice_files_exist(self, exp_number, scan_number, pt_number=None):
"""
Check whether data file for a scan or pt number exists on the
:param exp_number: experiment number or None (default to current experiment number)
:param scan_number:
:param pt_number: if None, check SPICE file; otherwise, detector xml file
:return: boolean (exist?) and string (file name)
"""
# check inputs
assert isinstance(exp_number, int) or pt_number is None
assert isinstance(scan_number, int)
assert isinstance(pt_number, int) or pt_number is None
# deal with default experiment number
if exp_number is None:
exp_number = self._expNumber
# 2 cases
if pt_number is None:
# no pt number, then check SPICE file
spice_file_name = get_spice_file_name(self._instrumentName, exp_number, scan_number)
try:
file_name = os.path.join(self._dataDir, spice_file_name)
except AttributeError:
raise AttributeError('Unable to create SPICE file name from directory %s and file name %s.'
'' % (self._dataDir, spice_file_name))
else:
# pt number given, then check whether the XML file for Pt exists
xml_file_name = get_det_xml_file_name(self._instrumentName, exp_number, scan_number,
pt_number)
file_name = os.path.join(self._dataDir, xml_file_name)
# END-IF
return os.path.exists(file_name), file_name
@staticmethod
def estimate_background(pt_intensity_dict, bg_pt_list):
"""
Estimate background value by average the integrated counts of some Pt.
:param pt_intensity_dict:
:param bg_pt_list: list of Pt. that are used to calculate background
:return:
"""
# Check
assert isinstance(pt_intensity_dict, dict)
assert isinstance(bg_pt_list, list) and len(bg_pt_list) > 0
# Sum over all Pt.
bg_sum = 0.
for bg_pt in bg_pt_list:
assert bg_pt in pt_intensity_dict, 'Pt. %d is not calculated.' % bg_pt
bg_sum += pt_intensity_dict[bg_pt]
avg_bg = float(bg_sum) / len(bg_pt_list)
return avg_bg
def get_surveyed_scans(self):
"""
get list of scans that are surveyed
:return:
"""
scan_number_list = [info[1] for info in self._scanSummaryList]
return scan_number_list
def get_ub_matrix(self, exp_number):
""" Get UB matrix assigned to an experiment
:param exp_number:
:return:
"""
# check
assert isinstance(exp_number, int), 'Experiment number must be an integer but not %s.' % str(type(exp_number))
if exp_number not in self._myUBMatrixDict:
err_msg = 'Experiment number %d has no UB matrix set up. Here ' \
'are list of experiments that have UB matrix set up: %s.' \
'' % (exp_number, str(self._myUBMatrixDict.keys()))
raise KeyError(err_msg)
return self._myUBMatrixDict[exp_number]
def get_calibrated_wave_length(self, exp_number):
""" Get the user specified (i.e., calibrated) wave length for a specific experiment
:param exp_number:
:return:
"""
# check inputs
assert isinstance(exp_number, int), 'Experiment numbe {0} must be an integer but not a {1}' \
''.format(exp_number, type(exp_number))
if exp_number not in self._userWavelengthDict:
return None
return self._userWavelengthDict[exp_number]
def get_wave_length(self, exp_number, scan_number_list):
"""
Get the wavelength.
Exception: RuntimeError if there are more than 1 wavelength found with all given scan numbers
:param exp_number:
:param scan_number_list:
:return:
"""
# check whether there is use wave length
if exp_number in self._userWavelengthDict:
return self._userWavelengthDict[exp_number]
# get the SPICE workspace
wave_length_set = set()
# go through all the SPICE table workspace
for scan_number in scan_number_list:
spice_table_name = get_spice_table_name(exp_number, scan_number)
curr_wl = get_wave_length(spice_table_name)
wave_length_set.add(curr_wl)
# END-FOR
if len(wave_length_set) > 1:
raise RuntimeError('There are more than 1 (%s) wave length found in scans.' % str(wave_length_set))
return wave_length_set.pop()
@staticmethod
def get_motor_step(exp_number, scan_number):
""" For omega/phi scan, get the average step of the motor
:param exp_number:
:param scan_number:
:return:
"""
# check
assert isinstance(exp_number, int), 'Experiment number {0} must be an integer but not a {1}.' \
''.format(exp_number, type(scan_number))
assert isinstance(scan_number, int), 'Scan number {0} must be an integer but not a {1}.' \
''.format(scan_number, type(scan_number))
# get SPICE table
spice_table_name = get_spice_table_name(exp_number, scan_number)
spice_table = AnalysisDataService.retrieve(spice_table_name)
if spice_table.rowCount() == 0:
raise RuntimeError('Spice table %s is empty.')
elif spice_table.rowCount() == 0:
raise RuntimeError('Only 1 row in Spice table %s. All motors are stationary.' % spice_table)
# get the motors values
omega_vec = get_log_data(spice_table, 'omega')
omega_dev, omega_step, omega_step_dev = get_step_motor_parameters(omega_vec)
omega_tup = omega_dev, ('omega', omega_step, omega_step_dev)
chi_vec = get_log_data(spice_table, 'chi')
chi_dev, chi_step, chi_step_dev = get_step_motor_parameters(chi_vec)
chi_tup = chi_dev, ('chi', chi_step, chi_step_dev)
phi_vec = get_log_data(spice_table, 'phi')
phi_dev, phi_step, phi_step_dev = get_step_motor_parameters(phi_vec)
phi_tup = phi_dev, ('phi', phi_step, phi_step_dev)
# find the one that moves
move_tup = max([omega_tup, chi_tup, phi_tup])
return move_tup[1]
# TEST Me - This need a lot of work because of single-pt scans
def export_to_fullprof(self, exp_number, scan_roi_list, user_header,
export_absorption, fullprof_file_name, high_precision,
integration_direction='vertical'):
"""
Export peak intensities to Fullprof data file
:param exp_number:
:param scan_roi_list: list of 2-tuple: (1) scan number (2) roi/mask name
:param user_header:
:param export_absorption: requiring UB matrix
:param fullprof_file_name:
:param high_precision: flag to write peak intensity as f18.5 if true; otherwise, output as f8.2
:return: 2-tuples. status and return object ((mixed) file content or error message)
"""
# check
assert isinstance(exp_number, int), 'Experiment number must be an integer.'
assert isinstance(scan_roi_list, list), 'Scan number list must be a list but not %s.' \
'' % str(type(scan_roi_list))
assert len(scan_roi_list) > 0, 'Scan number list must larger than 0'
# get wave-length
scan_number_list = [t[0] for t in scan_roi_list]
exp_wave_length = self.get_wave_length(exp_number, scan_number_list)
# get the information whether there is any k-shift vector specified by user
# form k-shift and peak intensity information
scan_kindex_dict = dict()
k_shift_dict = dict()
for k_index in self._kShiftDict.keys():
tup_value = self._kShiftDict[k_index]
k_shift_dict[k_index] = tup_value[0]
for scan_number in tup_value[1]:
scan_kindex_dict[scan_number] = k_index
# END-FOR (scan_number)
# END-FOR (_kShiftDict)
error_message = 'Number of scans with k-shift must either be 0 (no shift at all) or ' \
'equal to or larger than the number scans to export.'
assert len(scan_kindex_dict) == 0 or len(scan_kindex_dict) >= len(scan_roi_list), error_message
# form peaks
no_shift = len(scan_kindex_dict) == 0
# get ub matrix in the case of export absorption
if export_absorption:
try:
ub_matrix = self.get_ub_matrix(exp_number)
except RuntimeError as err:
raise RuntimeError('It is required to have UB matrix set up for exporting absorption\n(error '
'message: {0}'.format(err))
else:
ub_matrix = None
mixed_content = 'Nothing is written'
for algorithm_type in ['simple', 'mixed', 'gauss']:
# set list of peaks for exporting
peaks = list()
for scan_number, roi_name in scan_roi_list:
# create a single peak information dictionary for
peak_dict = dict()
# get peak-info object
if (exp_number, scan_number) in self._myPeakInfoDict:
peak_info = self._myPeakInfoDict[exp_number, scan_number]
else:
pt_number = 1
peak_info = self._single_pt_integration_dict[exp_number, scan_number, pt_number, roi_name]
peak_info = peak_info[integration_direction]
# get HKL
try:
peak_dict['hkl'] = peak_info.get_hkl(user_hkl=True)
# self._myPeakInfoDict[(exp_number, scan_number)].get_hkl(user_hkl=True)
except RuntimeError as run_err:
raise RuntimeError('Peak index error: {0}.'.format(run_err))
intensity, std_dev = peak_info.get_intensity(algorithm_type, lorentz_corrected=True)
# self._myPeakInfoDict[(exp_number, scan_number)]
if intensity < std_dev:
# error is huge, very likely bad gaussian fit
if self._debugPrintMode:
print('[INFO] Integration Type {0}: Scan {1} Intensity {2} < Std Dev {2} '
'Excluded from exporting.'.format(algorithm_type, scan_number, intensity, std_dev))
continue
# END-IF
peak_dict['intensity'] = intensity
peak_dict['sigma'] = std_dev
if no_shift:
peak_dict['kindex'] = 0
else:
peak_dict['kindex'] = scan_kindex_dict[scan_number]
if export_absorption:
# calculate absorption correction
spice_ub = convert_mantid_ub_to_spice(ub_matrix)
up_cart, us_cart = absorption.calculate_absorption_correction_2(
exp_number, scan_number, spice_ub)
peak_dict['up'] = up_cart
peak_dict['us'] = us_cart
# append peak (in dict) to peaks
peaks.append(peak_dict)
# END-FOR (scan_number)
# get file name for this type
this_file_name = fullprof_file_name.split('.')[0] + '_' + algorithm_type + '.dat'
file_content = fputility.write_scd_fullprof_kvector(
user_header=user_header, wave_length=exp_wave_length,
k_vector_dict=k_shift_dict, peak_dict_list=peaks,
fp_file_name=this_file_name, with_absorption=export_absorption,
high_precision=high_precision)
if algorithm_type == 'mixed':
mixed_content = file_content
continue
# END-FOR
return mixed_content
def export_md_data(self, exp_number, scan_number, base_file_name):
"""
Export MD data to an external file
:param exp_number:
:param scan_number:
:param base_file_name:
:return: output file name
"""
# get output file name and source workspace name
out_file_name = os.path.join(self._workDir, base_file_name)
status, pt_list = self.get_pt_numbers(exp_number, scan_number)
assert status, pt_list
md_ws_name = get_merged_md_name(self._instrumentName, exp_number, scan_number, pt_list)
temp_out_ws = base_file_name
mantidsimple.ConvertCWSDMDtoHKL(InputWorkspace=md_ws_name,
UBMatrix='1., 0., 0., 0., 1., 0., 0., 0., 1',
OutputWorkspace=temp_out_ws,
QSampleFileName=out_file_name)
mantidsimple.DeleteWorkspace(Workspace=temp_out_ws)
return out_file_name
def find_scans_by_2theta(self, exp_number, two_theta, resolution, excluded_scans):
"""
find scans by 2theta (same or similar)
:param exp_number:
:param two_theta:
:param resolution:
:param excluded_scans:
:return:
"""
# check inputs
assert isinstance(exp_number, int), 'Exp number {0} must be integer'.format(exp_number)
assert isinstance(two_theta, float), '2-theta {0} must be a float.'.format(two_theta)
assert isinstance(resolution, float), 'Resolution {0} must be a float.'.format(resolution)
assert isinstance(excluded_scans, list), 'Excluded scans {0} must be a list.'.format(excluded_scans)
# get the list of scans in the memory
have_change = False
for scan_sum in self._scanSummaryList:
# get scan number
scan_number = scan_sum[1]
pt_number = scan_sum[2]
if scan_number in self._scan_2theta_set:
# already parsed
continue
have_change = True
# get 2theta
two_theta_i = float(self.get_sample_log_value(exp_number, scan_number, pt_number, '2theta'))
self._two_theta_scan_dict[two_theta_i] = scan_number
self._scan_2theta_set.add(scan_number)
# END-FOR
# check as an exception whether there are multiple scans with exactly same two theta
if len(self._two_theta_scan_dict) != len(self._scan_2theta_set):
raise RuntimeError('Exception case: scans with exactly same 2theta! FindScanBy2Theta fails!')
# sort 2thetas and index two thetas within a certain range
two_theta_list = numpy.array(sorted(self._two_theta_scan_dict.keys()))
min_2theta = two_theta - resolution
max_2theta = two_theta + resolution
min_index = bisect.bisect_left(two_theta_list, min_2theta)
max_index = bisect.bisect_left(two_theta_list, max_2theta)
# debug output
if have_change:
pass
# print('[DB...BAT] Dict size = {0}; Scan set size = {1}'.format(len(self._two_theta_scan_dict),
# len(self._scan_2theta_set)))
# print('[DB...BAT] 2theta list: {0}'.format(two_theta_list))
# print ('[DB..BAT] Input 2theta = {0}; 2-thetas in range: {1}'
# ''.format(two_theta, two_theta_list[min_index:max_index]))
# print ('[DB...BAT] index range: {0}, {1}'.format(min_index, max_index))
scans_set = set([self._two_theta_scan_dict[two_theta_j] for two_theta_j in two_theta_list[min_index:max_index]])
# print ('[DB...BAT] Find scans: {0} Excluded scans {1}'.format(scans_set, set(excluded_scans)))
scans_set = scans_set - set(excluded_scans)
# print ('[DB...BAT] Find scans: {0}'.format(scans_set))
# matched scans by removing single-pt scans
matched_scans = list(scans_set)
for scan_number in matched_scans:
spice_table = self._get_spice_workspace(exp_number, scan_number)
if spice_table.rowCount() == 1:
matched_scans.remove(scan_number)
return matched_scans
def get_experiment(self):
"""
Get experiment number
:return:
"""
return self._expNumber
def get_pt_numbers(self, exp_no, scan_no):
""" Get Pt numbers (as a list) for a scan in an experiment
:param exp_no:
:param scan_no:
:return: (Boolean, Object) as (status, pt number list/error message)
"""
# Check
if exp_no is None:
exp_no = self._expNumber
assert isinstance(exp_no, int), 'Experiment number {0} must be an integer'.format(exp_no)
assert isinstance(scan_no, int), 'Scan number {0} must be an integer'.format(scan_no)
# Get workspace
status, ret_obj = self.load_spice_scan_file(exp_no, scan_no)
if status is False:
return False, ret_obj
else:
table_ws_name = ret_obj
table_ws = AnalysisDataService.retrieve(table_ws_name)
# Get column for Pt.
col_name_list = table_ws.getColumnNames()
if 'Pt.' not in col_name_list:
return False, 'No column with name Pt. can be found in SPICE table.'
i_pt = col_name_list.index('Pt.')
assert 0 <= i_pt < len(col_name_list), 'Impossible to have assertion error!'
pt_number_list = []
num_rows = table_ws.rowCount()
for i in range(num_rows):
pt_number = table_ws.cell(i, i_pt)
pt_number_list.append(pt_number)
return True, pt_number_list
def get_raw_detector_counts(self, exp_no, scan_no, pt_no):
"""
Get counts on raw detector
:param exp_no:
:param scan_no:
:param pt_no:
:return: boolean, 2D numpy data
"""
# Get workspace (in memory or loading)
raw_ws = self.get_raw_data_workspace(exp_no, scan_no, pt_no)
if raw_ws is None:
return False, 'Raw data for Exp %d Scan %d Pt %d is not loaded.' % (exp_no, scan_no, pt_no)
# Convert to numpy array
det_shape = (self._detectorSize[0], self._detectorSize[1])
array2d = numpy.ndarray(shape=det_shape, dtype='float')
for i in range(det_shape[0]):
for j in range(det_shape[1]):
array2d[i][j] = raw_ws.readY(j * det_shape[0] + i)[0]
# Flip the 2D array to look detector from sample
array2d = numpy.flipud(array2d)
return array2d
def get_refined_ub_matrix(self):
"""
Get refined UB matrix and lattice parameters
:return:
"""
assert isinstance(self._refinedUBTup, tuple)
assert len(self._refinedUBTup) == 4
return self._refinedUBTup[1], self._refinedUBTup[2], self._refinedUBTup[3]
def get_region_of_interest(self, roi_name):
""" Get region of interest
:param roi_name: name of the ROI
:return: region of interest
"""
assert isinstance(roi_name, str),\
'ROI name {0} must be a string or None but not a {1}.'.format(roi_name, type(roi_name))
if roi_name not in self._roiDict:
# ROI: not saved
raise RuntimeError('ROI {0} is not in ROI dictionary which has keys {1}'
''.format(roi_name, self._roiDict.keys()))
# check...
lower_left_corner = self._roiDict[roi_name].lower_left_corner
upper_right_corner = self._roiDict[roi_name].upper_right_corner
if lower_left_corner is None or upper_right_corner is None:
raise RuntimeError('ROI positions not set')
return lower_left_corner, upper_right_corner
def get_region_of_interest_list(self):
"""
Get the list of all the ROIs defined
:return:
"""
return sorted(self._roiDict.keys())
def get_sample_log_value(self, exp_number, scan_number, pt_number, log_name):
"""
Get sample log's value from merged data!
:param exp_number:
:param scan_number:167
:param pt_number:
:param log_name:
:return: float
"""
assert isinstance(exp_number, int)
assert isinstance(scan_number, int)
assert isinstance(pt_number, int)
assert isinstance(log_name, str)
# access data from SPICE table
# TODO FIXME THIS IS A HACK!
if log_name == '2theta':
spice_table_name = get_spice_table_name(exp_number, scan_number)
print ('[DB...BAT] Scan {0} Spice Table {1}'.format(scan_number, spice_table_name))
spice_table_ws = AnalysisDataService.retrieve(spice_table_name)
log_value = spice_table_ws.toDict()[log_name][0]
else:
try:
status, pt_number_list = self.get_pt_numbers(exp_number, scan_number)
assert status
md_ws_name = get_merged_md_name(self._instrumentName, exp_number,
scan_number, pt_number_list)
md_ws = AnalysisDataService.retrieve(md_ws_name)
except KeyError as ke:
return 'Unable to find log value %s due to %s.' % (log_name, str(ke))
log_value = md_ws.getExperimentInfo(0).run().getProperty(log_name).value
return log_value
def get_merged_data(self, exp_number, scan_number, pt_number_list):
"""
Get merged data in format of numpy.ndarray to plot
:param exp_number:
:param scan_number:
:param pt_number_list:
:return: numpy.ndarray. shape = (?, 3)
"""
# check
assert isinstance(exp_number, int) and isinstance(scan_number, int)
assert isinstance(pt_number_list, list)
# get MDEventWorkspace
md_ws_name = get_merged_md_name(self._instrumentName, exp_number, scan_number, pt_number_list)
assert AnalysisDataService.doesExist(md_ws_name)
# call ConvertCWMDtoHKL to write out the temp file
base_name = 'temp_%d_%d_rand%d' % (exp_number, scan_number, random.randint(1, 10000))
out_file_name = self.export_md_data(exp_number, scan_number, base_name)
# load the merged data back from the ASCII data file
q_space_array, counts_array = load_hb3a_md_data(out_file_name)
return q_space_array, counts_array
def get_merged_scans(self):
"""
Get merged scans and Pts.
:return:
"""
return self._mergedWSManager[:]
def get_peak_info(self, exp_number, scan_number, pt_number=None):
"""
get PeakInfo instance, which including
:param exp_number: experiment number
:param scan_number:
:param pt_number:
:return: peakprocesshelper.PeakProcessRecord or None
"""
# Check for type
assert isinstance(exp_number, int), 'Experiment %s must be an integer but not of type %s.' \
'' % (str(exp_number), type(exp_number))
assert isinstance(scan_number, int), 'Scan number %s must be an integer but not of type %s.' \
'' % (str(scan_number), type(scan_number))
assert isinstance(pt_number, int) or pt_number is None, 'Pt number %s must be an integer or None, but ' \
'it is of type %s now.' % (str(pt_number),
type(pt_number))
# construct key
if pt_number is None:
p_key = (exp_number, scan_number)
else:
p_key = (exp_number, scan_number, pt_number)
# Check for existence
if p_key in self._myPeakInfoDict:
ret_value = self._myPeakInfoDict[p_key]
else:
ret_value = None
return ret_value
def get_peaks_integrated_intensities(self, exp_number, scan_number, pt_list):
"""
Get the integrated intensities for a peak
Requirements:
1. the Pts in the scan must have been merged and intensity is calculated.
2. experiment number and scan number must be integers
Guarantees: get the x-y plot for intensities of all Pts. X is pt number, Y is for intensity
:param exp_number:
:param scan_number:
:param pt_list:
:return:
"""
# check
assert isinstance(exp_number, int)
assert isinstance(scan_number, int)
assert isinstance(pt_list, list) or pt_list is None
# deal with pt list if it is None
if pt_list is None:
status, pt_list = self.get_pt_numbers(exp_number, scan_number)
assert status
int_peak_ws_name = get_integrated_peak_ws_name(exp_number, scan_number, pt_list)
assert AnalysisDataService.doesExist(int_peak_ws_name)
int_peak_ws = AnalysisDataService.retrieve(int_peak_ws_name)
num_peaks = int_peak_ws.getNumberPeaks()
array_size = num_peaks
vec_x = numpy.ndarray(shape=(array_size,))
vec_y = numpy.ndarray(shape=(array_size,))
for index in range(array_size):
peak_i = int_peak_ws.getPeak(index)
# Note: run number in merged workspace is a combination of pt number and scan number
# so it should have 1000 divided for the correct pt number
pt_number = peak_i.getRunNumber() % 1000
intensity = peak_i.getIntensity()
vec_x[index] = pt_number
vec_y[index] = intensity
# END-FOR
return vec_x, vec_y
def get_peak_integration_parameters(self, xlabel='2theta', ylabel=None, with_error=True):
"""
get the parameters from peak integration
:param xlabel: parameter name for x value
:param ylabel: parameter name for y value
:param with_error: If true, then output error
:return:
"""
# convert all kinds of y-label to a list of strings for y-label
if ylabel is None:
ylabel = ['sigma']
elif isinstance(ylabel, str):
ylabel = [ylabel]
# create list of output
param_list = list()
for (exp_number, scan_number) in self._myPeakInfoDict.keys():
peak_int_info = self._myPeakInfoDict[exp_number, scan_number]
# x value
try:
x_value = peak_int_info.get_parameter(xlabel)[0]
except RuntimeError as run_err:
print ('[ERROR] Exp {} Scan {}: {}'.format(exp_number, scan_number, run_err))
continue
# set up
scan_i = [x_value]
for param_name in ylabel:
if param_name.lower() == 'scan':
# scan number
y_value = scan_number
scan_i.append(y_value)
else:
# parameter name
y_value, e_value = peak_int_info.get_parameter(param_name.lower())
scan_i.append(y_value)
if with_error:
scan_i.append(e_value)
# END-FOR
param_list.append(scan_i)
# END-FOR
if len(param_list) == 0:
raise RuntimeError('No integrated peak is found')
# convert to a matrix
param_list.sort()
xye_matrix = numpy.array(param_list)
return xye_matrix
def generate_mask_workspace(self, exp_number, scan_number, roi_start, roi_end, mask_tag=None):
""" Generate a mask workspace
:param exp_number:
:param scan_number:
:param roi_start:
:param roi_end:
:return:
"""
# assert ...
assert isinstance(exp_number, int), 'Experiment number {0} ({1}) must be an integer.' \
''.format(exp_number, type(exp_number))
assert isinstance(scan_number, int), 'Scan number {0} ({1}) must be an integer.' \
''.format(scan_number, type(scan_number))
# create an xml file
mask_file_name = get_mask_xml_temp(self._workDir, exp_number, scan_number)
generate_mask_file(file_path=mask_file_name,
ll_corner=roi_start,
ur_corner=roi_end)
# check reference workspace for mask workspace
if self._refWorkspaceForMask is None:
return False, 'There is no reference workspace. Plot a Pt. first!'
elif AnalysisDataService.doesExist(self._refWorkspaceForMask) is False:
return False, 'Previous reference workspace has been deleted. Plot a Pt. first'
# get the name of the mask workspace to be loaded to
if mask_tag is None:
# use default name
mask_ws_name = get_mask_ws_name(exp_number, scan_number)
else:
# use given name
mask_ws_name = str(mask_tag)
# load the mask workspace
mantidsimple.LoadMask(Instrument='HB3A',
InputFile=mask_file_name,
OutputWorkspace=mask_ws_name,
RefWorkspace=self._refWorkspaceForMask)
mantidsimple.InvertMask(InputWorkspace=mask_ws_name,
OutputWorkspace=mask_ws_name)
# register
self._roiDict[mask_tag].set_mask_workspace_name(mask_ws_name)
return True, mask_tag
def get_working_directory(self):
"""
get working directory
:return:
"""
return self._workDir
def group_workspaces(self, exp_number, group_name):
"""
:return:
"""
# Find out the input workspace name
ws_names_str = ''
for key in self._myRawDataWSDict.keys():
if key[0] == exp_number:
ws_names_str += '%s,' % self._myRawDataWSDict[key].name()
for key in self._mySpiceTableDict.keys():
if key[0] == exp_number:
exp_number, scan_number = key
spice_table_name = get_spice_table_name(exp_number, scan_number)
ws_names_str += '%s,' % spice_table_name # self._mySpiceTableDict[key].name()
# Check
if len(ws_names_str) == 0:
return False, 'No workspace is found for experiment %d.' % exp_number
# Remove last ','
ws_names_str = ws_names_str[:-1]
# Group
mantidsimple.GroupWorkspaces(InputWorkspaces=ws_names_str,
OutputWorkspace=group_name)
return
def has_integrated_peak(self, exp_number, scan_number, masked, pt_list=None,
normalized_by_monitor=False, normalized_by_time=False):
""" Check whether the peak is integrated as designated
:param exp_number:
:param scan_number:
:param masked:
:param pt_list:
:param normalized_by_monitor:
:param normalized_by_time:
:return:
"""
# check requirements
assert isinstance(exp_number,int), 'Experiment number must be an integer but not %s.' \
'' % str(type(exp_number))
assert isinstance(scan_number, int), 'Scan number must be an integer but not %s.' \
'' % str(type(scan_number))
# get default Pt list if required
if pt_list is None:
status, ret_obj = self.get_pt_numbers(exp_number, scan_number)
if status is False:
raise RuntimeError(ret_obj)
pt_list = ret_obj
# END-IF
assert isinstance(pt_list, list) and len(pt_list) > 0
peak_ws_name = get_integrated_peak_ws_name(exp_number, scan_number, pt_list, masked,
normalized_by_monitor, normalized_by_time)
return AnalysisDataService.doesExist(peak_ws_name)
def has_merged_data(self, exp_number, scan_number, pt_number_list=None):
"""
Check whether the data has been merged to an MDEventWorkspace
:param exp_number:
:param scan_number:
:param pt_number_list:
:return:
"""
# check and retrieve pt number list
assert isinstance(exp_number, int) and isinstance(scan_number, int)
if pt_number_list is None:
status, pt_number_list = self.get_pt_numbers(exp_number, scan_number)
if status is False:
return False
else:
assert isinstance(pt_number_list, list)
# get MD workspace name
md_ws_name = get_merged_md_name(self._instrumentName, exp_number, scan_number, pt_number_list)
return AnalysisDataService.doesExist(md_ws_name)
def has_peak_info(self, exp_number, scan_number, pt_number=None):
""" Check whether there is a peak found...
:param exp_number:
:param scan_number:
:param pt_number:
:return:
"""
# Check for type
assert isinstance(exp_number, int)
assert isinstance(scan_number, int)
assert isinstance(pt_number, int) or pt_number is None
# construct key
if pt_number is None:
p_key = (exp_number, scan_number)
else:
p_key = (exp_number, scan_number, pt_number)
return p_key in self._myPeakInfoDict
def has_roi_generated(self, roi_name):
"""
check whether a MaskWorkspace has been generated for an ROI
:param roi_name:
:return:
"""
# check input
assert isinstance(roi_name, str), 'ROI name {0} must be a string but not a {1}'.format(roi_name, type(roi_name))
# check whether it is in the dicationary and has a mask workspace set
has = True
if roi_name not in self._roiDict:
has = False
elif self._roiDict[roi_name].mask_workspace is None:
has = False
return has
def import_2theta_gauss_sigma_file(self, twotheta_sigma_file_name):
""" import a 2theta-sigma column file
:param twotheta_sigma_file_name:
:return: (numpy.array, numpy.array) : vector X and vector y
"""
assert isinstance(twotheta_sigma_file_name, str), 'Input file name {0} must be a string but not a {1}.' \
''.format(twotheta_sigma_file_name,
type(twotheta_sigma_file_name))
if os.path.exists(twotheta_sigma_file_name) is False:
raise RuntimeError('2theta-sigma file {0} does not exist.'.format(twotheta_sigma_file_name))
vec_2theta, vec_sigma = numpy.loadtxt(twotheta_sigma_file_name, delimiter=' ', usecols=(0, 1), unpack=True)
# TODO - 20180814 - shall be noted as single-pt scan...
self._two_theta_sigma = vec_2theta, vec_sigma
return vec_2theta, vec_sigma
def index_peak(self, ub_matrix, scan_number, allow_magnetic=False):
""" Index peaks in a Pt. by create a temporary PeaksWorkspace which contains only 1 peak
:param ub_matrix: numpy.ndarray (3, 3)
:param scan_number:
:param allow_magnetic: flag to allow magnetic reflections
:return: boolean, object (list of HKL or error message)
"""
# Check
assert isinstance(ub_matrix, numpy.ndarray), 'UB matrix must be an ndarray'
assert ub_matrix.shape == (3, 3), 'UB matrix must be a 3x3 matrix.'
assert isinstance(scan_number, int), 'Scan number must be in integer.'
# Find out the PeakInfo
exp_number = self._expNumber
peak_info = self.get_peak_info(exp_number, scan_number)
# Find out the peak workspace
status, pt_list = self.get_pt_numbers(exp_number, scan_number)
assert status
peak_ws_name = get_peak_ws_name(exp_number, scan_number, pt_list)
peak_ws = AnalysisDataService.retrieve(peak_ws_name)
assert peak_ws.getNumberPeaks() > 0
# Create a temporary peak workspace for indexing
temp_index_ws_name = 'TempIndexExp%dScan%dPeak' % (exp_number, scan_number)
mantidsimple.CreatePeaksWorkspace(NumberOfPeaks=0, OutputWorkspace=temp_index_ws_name)
temp_index_ws = AnalysisDataService.retrieve(temp_index_ws_name)
temp_index_ws.addPeak(peak_ws.getPeak(0))
virtual_peak = temp_index_ws.getPeak(0)
virtual_peak.setHKL(0, 0, 0)
virtual_peak.setQSampleFrame(peak_info.get_peak_centre_v3d())
# Set UB matrix to the peak workspace
ub_1d = ub_matrix.reshape(9,)
# Set UB
mantidsimple.SetUB(Workspace=temp_index_ws_name, UB=ub_1d)
# Note: IndexPeaks and CalculatePeaksHKL do the same job
# while IndexPeaks has more control on the output
if allow_magnetic:
tol = 0.5
else:
tol = 0.3
num_peak_index, error = mantidsimple.IndexPeaks(PeaksWorkspace=temp_index_ws_name,
Tolerance=tol,
RoundHKLs=False)
temp_index_ws = AnalysisDataService.retrieve(temp_index_ws_name)
if num_peak_index == 0:
return False, 'No peak can be indexed: {0}.'.format(error)
elif num_peak_index > 1:
raise RuntimeError('Case for PeaksWorkspace containing more than 1 peak is not '
'considered. Contact developer for this issue.')
else:
hkl_v3d = temp_index_ws.getPeak(0).getHKL()
hkl = numpy.array([hkl_v3d.X(), hkl_v3d.Y(), hkl_v3d.Z()])
# set HKL to peak
peak_info.set_hkl(hkl[0], hkl[1], hkl[2])
# delete temporary workspace
mantidsimple.DeleteWorkspace(Workspace=temp_index_ws_name)
return True, (hkl, error)
def integrate_scan_peak(self, exp_number, scan_number, peak_centre, mask_name, normalization,
scale_factor, background_pt_tuple):
"""
new way to integrate a peak in a scan
Note: it is going to replace "integrate_scan_peaks()"
:param exp_number:
:param scan_number:
:param peak_centre:
:param mask_name:
:param normalization:
:param scale_factor:
:param background_pt_tuple:
:return:
"""
# check inputs
assert isinstance(exp_number, int), 'Experiment number {0} must be an integer but not a {1}.' \
''.format(exp_number, type(exp_number))
assert isinstance(scan_number, int), 'Scan number {0} must be an integer but not a {1}.' \
''.format(scan_number, type(scan_number))
assert isinstance(mask_name, str), 'Mask name {0} must be a string but not a {1}.' \
''.format(mask_name, type(mask_name))
assert isinstance(normalization, str), 'Normalization type {0} must be a string but not a {1}.' \
''.format(normalization, type(normalization))
assert isinstance(scale_factor, float) or isinstance(scale_factor, int),\
'Scale factor {0} must be a float or integer but not a {1}.'.format(scale_factor, type(scale_factor))
assert len(peak_centre) == 3, 'Peak center {0} must have 3 elements for (Qx, Qy, Qz).'.format(peak_centre)
assert len(background_pt_tuple) == 2, 'Background tuple {0} must be of length 2.'.format(background_pt_tuple)
# get input MDEventWorkspace name for merged scan
status, ret_obj = self.get_pt_numbers(exp_number, scan_number)
if status:
pt_list = ret_obj
else:
raise RuntimeError('Unable to get Pt. list from Exp {0} Scan {1} due to {2}'
''.format(exp_number,scan_number, ret_obj))
md_ws_name = get_merged_md_name(self._instrumentName, exp_number, scan_number, pt_list)
# get the TableWorkspace name for Spice
spice_table_ws = get_spice_table_name(exp_number, scan_number)
# output PeaksWorkspace name and MaskWorkspace
if len(mask_name) > 0:
mask_ws_name = self.check_generate_mask_workspace(exp_number, scan_number, mask_name, check_throw=True)
else:
mask_ws_name = None
peak_ws_name = get_integrated_peak_ws_name(exp_number, scan_number, pt_list, mask_name)
# peak center
int_peak_dict = peak_integration_utility.integrate_peak_full_version(scan_md_ws_name=md_ws_name,
spice_table_name=spice_table_ws,
output_peak_ws_name=peak_ws_name,
peak_center=peak_centre,
mask_workspace_name=mask_ws_name,
norm_type=normalization,
intensity_scale_factor=scale_factor,
background_pt_tuple=background_pt_tuple)
return int_peak_dict
def integrate_scan_peaks(self, exp, scan, peak_radius, peak_centre,
merge_peaks=True, use_mask=False,
normalization='', mask_ws_name=None,
scale_factor=1.00, background_pt_tuple=None):
"""
:param exp:
:param scan:
:param peak_radius:
:param peak_centre: a float radius or None for not using
:param merge_peaks: If selected, merged all the Pts can return 1 integrated peak's value;
otherwise, integrate peak for each Pt.
:param use_mask:
:param normalization: normalization set up (by time or ...)
:param mask_ws_name: mask workspace name or None
:param scale_factor: integrated peaks' scaling factor
:return: dictionary of Pts.
"""
# check
assert isinstance(exp, int)
assert isinstance(scan, int)
assert isinstance(peak_radius, float) or peak_radius is None
assert len(peak_centre) == 3
assert isinstance(merge_peaks, bool)
peak_int_dict = self.integrate_scan_peak(exp_number=exp, scan_number=scan, peak_centre=peak_centre,
mask_name=mask_ws_name, normalization=normalization,
scale_factor=scale_factor, background_pt_tuple=background_pt_tuple)
#
# store the data into peak info
if (exp, scan) not in self._myPeakInfoDict:
raise RuntimeError('Exp %d Scan %d is not recorded in PeakInfo-Dict' % (exp, scan))
self._myPeakInfoDict[(exp, scan)].set_pt_intensity(peak_int_dict)
return True, peak_int_dict
@staticmethod
def gauss_correction_peak_intensity(pt_dict):
"""
fit a peak along Pt. with Gaussian and thus calculate background automatically
:param pt_dict:
:return: 3-tuple (intensity, background and information string)
"""
# check
assert isinstance(pt_dict, dict), 'Input must be a dictionary but not {0}'.format(type(pt_dict))
# convert to vector
tup_list = list()
for pt in pt_dict.keys():
tup_list.append((pt, pt_dict[pt]))
tup_list.sort()
list_x = list()
list_y = list()
for tup in tup_list:
list_x.append(float(tup[0]))
list_y.append(float(tup[1]))
vec_x = numpy.array(list_x)
vec_y = numpy.array(list_y)
vec_e = numpy.sqrt(vec_y)
# do fit
error, gauss_params, model_vec_y = peak_integration_utility.fit_gaussian_linear_background(vec_x, vec_y, vec_e)
x0, gauss_sigma, gauss_a, gauss_bkgd = gauss_params
if not (0 < x0 < vec_x[-1]):
raise RuntimeError('Fitted center of the peak {0} is out of range, which is not correct'.format(x0))
if gauss_a <= 0.:
raise RuntimeError('Fitted peak height {0} is negative!'.format(gauss_a))
# calculate the peak intensity
peak_intensity = peak_integration_utility.calculate_peak_intensity_gauss(gauss_a, gauss_sigma)
# information
info_str = 'Fit error = {0}: a = {1}, x0 = {2}, sigma = {3}, b = {4}'.format(error, gauss_a, x0, gauss_sigma,
gauss_bkgd)
return peak_intensity, gauss_bkgd, info_str
def integrate_single_pt_scans_detectors_counts(self, exp_number, scan_number_list, roi_name, integration_direction,
fit_gaussian):
"""
integrate a list of single-pt scans detector counts with fit with Gaussian function as an option
:param exp_number:
:param scan_number_list:
:param roi_name:
:param integration_direction:
:param fit_gaussian:
:return: a dictionary of peak height
"""
# check inputs
check_list('Scan numbers', scan_number_list)
# get the workspace key. if it does exist, it means there is no need to sum the data but just get from dict
ws_record_key = self.generate_single_pt_scans_key(exp_number, scan_number_list, roi_name,
integration_direction)
print ('[DB...BAT] Retrieve ws record key: {}'.format(ws_record_key))
if ws_record_key in self._single_pt_matrix_dict:
# it does exist. get the workspace name
integration_manager = self._single_pt_matrix_dict[ws_record_key]
out_ws_name = integration_manager.get_workspace()
print ('[DB...TRACE] workspace key {} does exist: workspace name = {}'
''.format(ws_record_key, out_ws_name))
else:
# it does not exist. sum over all the scans and create the workspace
out_ws_name = 'Exp{}_Scan{}-{}_{}_{}'.format(exp_number, scan_number_list[0], scan_number_list[-1],
roi_name, integration_direction)
print('[DB...TRACE] workspace key {} does not exist. Integrate and generate workspace {}.'
''.format(ws_record_key, out_ws_name))
# initialize the vectors to form a Mantid Workspace2D
appended_vec_x = None
appended_vec_y = None
appended_vec_e = None
scan_spectrum_map = dict()
spectrum_scan_map = dict()
for ws_index, scan_number in enumerate(scan_number_list):
scan_spectrum_map[scan_number] = ws_index
spectrum_scan_map[ws_index] = scan_number
pt_number = 1
self.integrate_detector_image(exp_number, scan_number, pt_number, roi_name,
integration_direction=integration_direction,
fit_gaussian=fit_gaussian)
# create a workspace
det_integration_info = \
self._single_pt_integration_dict[(exp_number, scan_number, pt_number, roi_name)][
integration_direction]
vec_x, vec_y = det_integration_info.get_vec_x_y()
if appended_vec_x is None:
appended_vec_x = vec_x
appended_vec_y = vec_y
appended_vec_e = numpy.sqrt(vec_y)
else:
appended_vec_x = numpy.concatenate((appended_vec_x, vec_x), axis=0)
appended_vec_y = numpy.concatenate((appended_vec_y, vec_y), axis=0)
appended_vec_e = numpy.concatenate((appended_vec_e, numpy.sqrt(vec_y)), axis=0)
# END-IF
# END-FOR
# create workspace
mantidsimple.CreateWorkspace(DataX=appended_vec_x, DataY=appended_vec_y,
DataE=appended_vec_e, NSpec=len(scan_number_list),
OutputWorkspace=out_ws_name)
# record the workspace
integration_manager = SinglePtScansIntegrationOperation(exp_number, scan_number_list, out_ws_name,
scan_spectrum_map, spectrum_scan_map)
self._single_pt_matrix_dict[ws_record_key] = integration_manager
self._current_single_pt_integration_key = ws_record_key
# END-IF-ELSE
# about result: peak height
peak_height_dict = dict()
for scan_number in scan_number_list:
peak_height_dict[scan_number] = 0.
# fit gaussian
if fit_gaussian:
# for mantid Gaussian, 'peakindex', 'Height', 'PeakCentre', 'Sigma', 'A0', 'A1', 'chi2'
fit_result_dict, model_ws_name = peak_integration_utility.fit_gaussian_linear_background_mtd(out_ws_name)
# digest fit parameters
for ws_index in sorted(fit_result_dict.keys()):
scan_number = integration_manager.get_scan_number(ws_index, from_zero=True)
integrate_record_i = \
self._single_pt_integration_dict[(exp_number, scan_number, 1, roi_name)][integration_direction]
integrate_record_i.set_fit_cost(fit_result_dict[ws_index]['chi2'])
integrate_record_i.set_fit_params(x0=fit_result_dict[ws_index]['PeakCentre'],
sigma=fit_result_dict[ws_index]['Sigma'],
a0=fit_result_dict[ws_index]['A0'],
a1=fit_result_dict[ws_index]['A1'],
height=fit_result_dict[ws_index]['Height'])
peak_height_dict[scan_number] = fit_result_dict[ws_index]['Height']
# END-FOR
# workspace
integration_manager.set_model_workspace(model_ws_name)
# print ('[DB..BAT] SinglePt-Scan: cost = {0}, params = {1}, integrated = {2} +/- {3}'
# ''.format(cost, params, integrated_intensity, intensity_error))
# END-IF
return peak_height_dict
def integrate_detector_image(self, exp_number, scan_number, pt_number, roi_name, fit_gaussian,
integration_direction):
""" Integrate detector counts on detector image inside a given ROI.
Integration is either along X-direction (summing along rows) or Y-direction (summing along columns)
Peak fitting is removed from this method
:param exp_number:
:param scan_number:
:param pt_number:
:param roi_name:
:param fit_gaussian:
:param integration_direction: horizontal (integrate along X direction) or vertical (integrate along Y direction)
:return:
"""
# check data loaded with mask information
does_loaded = self.does_raw_loaded(exp_number, scan_number, pt_number, roi_name)
if not does_loaded:
# load SPICE table
self.load_spice_scan_file(exp_number, scan_number)
# load Pt xml
self.load_spice_xml_file(exp_number, scan_number, pt_number)
# END-IF
# check integration direction
assert isinstance(integration_direction, str) and integration_direction in ['vertical', 'horizontal'],\
'Integration direction {} (now of type {}) must be a string equal to eiether vertical or horizontal' \
''.format(integration_direction, type(integration_direction))
# check whether the first step integration been done
roi_key = exp_number, scan_number, pt_number, roi_name
if roi_key in self._single_pt_integration_dict\
and integration_direction in self._single_pt_integration_dict[roi_key]:
sum_counts = False
else:
sum_counts = True
# Get data and plot
if sum_counts:
raw_det_data = self.get_raw_detector_counts(exp_number, scan_number, pt_number)
assert isinstance(raw_det_data, numpy.ndarray), 'A matrix must be an ndarray but not {0}.' \
''.format(type(raw_det_data))
roi_lower_left, roi_upper_right = self.get_region_of_interest(roi_name)
data_in_roi = raw_det_data[roi_lower_left[0]:roi_upper_right[0], roi_lower_left[1]:roi_upper_right[1]]
print('IN ROI: Data set shape: {}'.format(data_in_roi.shape))
if integration_direction == 'horizontal':
# FIXME - This works!
# integrate peak along row
print(roi_lower_left[1], roi_upper_right[1])
vec_x = numpy.array(range(roi_lower_left[1], roi_upper_right[1]))
vec_y = raw_det_data[roi_lower_left[0]:roi_upper_right[0], roi_lower_left[1]:roi_upper_right[1]].sum(
axis=0)
elif integration_direction == 'vertical':
# integrate peak along column
# FIXME - This doesn't work!
print(roi_lower_left[0], roi_upper_right[0])
vec_x = numpy.array(range(roi_lower_left[0], roi_upper_right[0]))
vec_y = raw_det_data[roi_lower_left[0]:roi_upper_right[0], roi_lower_left[1]:roi_upper_right[1]].sum(
axis=1)
print('[DB...BAT] Vec X shape: {}; Vec Y shape: {}'.format(vec_x.shape, vec_y.shape))
else:
# wrong
raise NotImplementedError('It is supposed to be unreachable.')
# END-IF-ELSE
# initialize integration record
# get 2theta
two_theta = self.get_sample_log_value(self._expNumber, scan_number, pt_number, '2theta')
# create SinglePointPeakIntegration
integrate_record = SinglePointPeakIntegration(exp_number, scan_number, roi_name, pt_number, two_theta)
integrate_record.set_xy_vector(vec_x, vec_y, integration_direction)
# add the _single_pt_integration_dict()
if (exp_number, scan_number, pt_number, roi_name) not in self._single_pt_integration_dict:
self._single_pt_integration_dict[exp_number, scan_number, pt_number, roi_name] = dict()
self._single_pt_integration_dict[exp_number, scan_number, pt_number, roi_name][integration_direction] = \
integrate_record
# else:
# # retrieve the integration record from previously saved
# integrate_record = self._single_pt_integration_dict[roi_key][integration_direction]
# # vec_x, vec_y = integrate_record.get_vec_x_y()
# END-IF
# if fit_gaussian:
# cost, params, cov_matrix = peak_integration_utility.fit_gaussian_linear_background(vec_x, vec_y,
# numpy.sqrt(vec_y))
# gaussian_a = params[2]
# gaussian_sigma = params[1]
# integrated_intensity, intensity_error = \
# peak_integration_utility.calculate_peak_intensity_gauss(gaussian_a, gaussian_sigma)
# print ('[DB..BAT] SinglePt-Scan: cost = {0}, params = {1}, integrated = {2} +/- {3}'
# ''.format(cost, params, integrated_intensity, intensity_error))
#
# else:
# cost = -1
# params = dict()
# integrated_intensity = 0.
#
# integrate_record.set_fit_cost(cost)
# integrate_record.set_fit_params(x0=params[0], sigma=params[1], a=params[2], b=params[3])
return
@staticmethod
def load_scan_survey_file(csv_file_name):
""" Load scan survey from a csv file
:param csv_file_name:
:return: 2-tuple as header and list
"""
# check
assert isinstance(csv_file_name, str)
row_list = list()
# open file and parse
with open(csv_file_name, 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=',', quotechar='|')
# get header
header = reader.next()
# body
for row in reader:
# check
assert isinstance(row, list)
assert len(row) == 7
# convert
counts = float(row[0])
scan = int(row[1])
pt = int(row[2])
h = float(row[3])
k = float(row[4])
l = float(row[5])
q_range = float(row[6])
# append
row_list.append([counts, scan, pt, h, k, l, q_range])
# END-FOR
# END-WITH
return header, row_list
def load_spice_scan_file(self, exp_no, scan_no, spice_file_name=None):
"""
Load a SPICE scan file to table workspace and run information matrix workspace.
:param exp_no:
:param scan_no:
:param spice_file_name:
:return: status (boolean), error message (string)
"""
# Default for exp_no
if exp_no is None:
exp_no = self._expNumber
# Check whether the workspace has been loaded
assert isinstance(exp_no, int)
assert isinstance(scan_no, int)
out_ws_name = get_spice_table_name(exp_no, scan_no)
if (exp_no, scan_no) in self._mySpiceTableDict:
return True, out_ws_name
# load the SPICE table data if the target workspace does not exist
if not AnalysisDataService.doesExist(out_ws_name):
# Form standard name for a SPICE file if name is not given
if spice_file_name is None:
spice_file_name = os.path.join(self._dataDir,
get_spice_file_name(self._instrumentName, exp_no, scan_no))
# Download SPICE file if necessary
if os.path.exists(spice_file_name) is False:
file_available, download_result = self.download_spice_file(exp_no, scan_no, over_write=True)
else:
file_available = True
download_result = None
if not file_available:
raise IOError('SPICE file for Exp {0} Scan {1} cannot be found at {2} or downloaded ({3})'
''.format(exp_no, scan_no, spice_file_name, download_result))
try:
spice_table_ws, info_matrix_ws = mantidsimple.LoadSpiceAscii(Filename=spice_file_name,
OutputWorkspace=out_ws_name,
RunInfoWorkspace='TempInfo')
mantidsimple.DeleteWorkspace(Workspace=info_matrix_ws)
except RuntimeError as run_err:
return False, 'Unable to load SPICE data %s due to %s' % (spice_file_name, str(run_err))
else:
spice_table_ws = AnalysisDataService.retrieve(out_ws_name)
# END-IF
# Store
self._add_spice_workspace(exp_no, scan_no, spice_table_ws)
return True, out_ws_name
def remove_pt_xml_workspace(self, exp_no, scan_no, pt_no):
"""
remove the Workspace2D loaded from SPICE XML detector file
:param exp_no:
:param scan_no:
:param pt_no:
:return:
"""
pt_ws_name = get_raw_data_workspace_name(exp_no, scan_no, pt_no)
if AnalysisDataService.doesExist(pt_ws_name):
AnalysisDataService.remove(pt_ws_name)
return
def load_spice_xml_file(self, exp_no, scan_no, pt_no, xml_file_name=None):
"""
Load SPICE's detector counts XML file from local data directory
Requirements: the SPICE detector counts file does exist. The XML file's name is given either
explicitly by user or formed according to a convention with given experiment number,
scan number and Pt number
:param exp_no:
:param scan_no:
:param pt_no:
:param xml_file_name:
:return:
"""
# Get XML file name with full path
if xml_file_name is None:
# use default
assert isinstance(exp_no, int) and isinstance(scan_no, int) and isinstance(pt_no, int)
xml_file_name = os.path.join(self._dataDir, get_det_xml_file_name(self._instrumentName,
exp_no, scan_no, pt_no))
# END-IF
# check whether file exists
assert os.path.exists(xml_file_name)
# retrieve and check SPICE table workspace
spice_table_ws = self._get_spice_workspace(exp_no, scan_no)
assert isinstance(spice_table_ws, mantid.dataobjects.TableWorkspace), 'SPICE table workspace must be a ' \
'TableWorkspace but not %s.' \
'' % type(spice_table_ws)
spice_table_name = spice_table_ws.name()
# load SPICE Pt. detector file
pt_ws_name = get_raw_data_workspace_name(exp_no, scan_no, pt_no)
try:
mantidsimple.LoadSpiceXML2DDet(Filename=xml_file_name,
OutputWorkspace=pt_ws_name,
SpiceTableWorkspace=spice_table_name,
PtNumber=pt_no)
if self._refWorkspaceForMask is None or AnalysisDataService.doesExist(pt_ws_name) is False:
self._refWorkspaceForMask = pt_ws_name
except RuntimeError as run_err:
return False, str(run_err)
# Add data storage
assert AnalysisDataService.doesExist(pt_ws_name), 'Unable to locate workspace {0}.'.format(pt_ws_name)
raw_matrix_ws = AnalysisDataService.retrieve(pt_ws_name)
self._add_raw_workspace(exp_no, scan_no, pt_no, raw_matrix_ws)
# clear the mask/ROI information
self._myRawDataMasked[(exp_no, scan_no, pt_no)] = None
return True, pt_ws_name
@staticmethod
def merge_multiple_scans(scan_md_ws_list, scan_peak_centre_list, merged_ws_name):
"""
Merge multiple scans
:param scan_md_ws_list: List of MDWorkspace, each of which is for a scan.
:param scan_peak_centre_list: list of peak centres for all scans.
:param merged_ws_name:
:return:
"""
# check validity
assert isinstance(scan_md_ws_list, list), 'Scan MDWorkspace name list cannot be of type %s.' \
'' % type(scan_md_ws_list)
assert isinstance(scan_peak_centre_list, list), 'Scan peak center list cannot be of type %s.' \
'' % type(scan_peak_centre_list)
assert len(scan_md_ws_list) >= 2 and len(scan_md_ws_list) == len(scan_peak_centre_list),\
'Number of MDWorkspace %d and peak centers %d are not correct.' % (len(scan_md_ws_list),
len(scan_peak_centre_list))
assert isinstance(merged_ws_name, str), 'Target MDWorkspace name for merged scans %s (%s) must ' \
'be a string.' % (str(merged_ws_name), type(merged_ws_name))
# get the workspace
ws_name_list = ''
for i_ws, ws_name in enumerate(scan_md_ws_list):
# build the input MDWorkspace list
if i_ws != 0:
ws_name_list += ', '
ws_name_list += ws_name
# rebin the MDEventWorkspace to make all MDEventWorkspace have same MDGrid
md_ws = AnalysisDataService.retrieve(ws_name)
frame = md_ws.getDimension(0).getMDFrame().name()
if frame == 'HKL':
mantidsimple.SliceMD(InputWorkspace=ws_name,
AlignedDim0='H,-10,10,1',
AlignedDim1='K,-10,10,1',
AlignedDim2='L,-10,10,1',
OutputWorkspace=ws_name)
else:
mantidsimple.SliceMD(InputWorkspace=ws_name,
AlignedDim0='Q_sample_x,-10,10,1',
AlignedDim1='Q_sample_y,-10,10,1',
AlignedDim2='Q_sample_z,-10,10,1',
OutputWorkspace=ws_name)
# END-FOR
# merge
mantidsimple.MergeMD(InputWorkspaces=ws_name_list,
OutputWorkspace=merged_ws_name)
# get the unit of MD workspace
md_ws = AnalysisDataService.retrieve(scan_md_ws_list[0])
frame = md_ws.getDimension(0).getMDFrame().name()
# calculating the new binning boundaries. It will not affect the merge result. but only for user's reference.
axis0_range = list()
axis1_range = list()
axis2_range = list()
for i_peak, peak in enumerate(scan_peak_centre_list):
if i_peak == 0:
axis0_range = [peak[0], peak[0], 0.]
axis1_range = [peak[1], peak[1], 0.]
axis2_range = [peak[2], peak[2], 0.]
else:
# axis 0
if peak[0] < axis0_range[0]:
axis0_range[0] = peak[0]
elif peak[0] > axis0_range[1]:
axis0_range[1] = peak[0]
# axis 1
if peak[1] < axis1_range[0]:
axis1_range[0] = peak[1]
elif peak[1] > axis1_range[1]:
axis1_range[1] = peak[1]
# axis 2
if peak[2] < axis2_range[0]:
axis2_range[0] = peak[2]
elif peak[2] > axis2_range[1]:
axis2_range[1] = peak[2]
# END-FOR
axis0_range[2] = axis0_range[1] - axis0_range[0]
axis1_range[2] = axis1_range[1] - axis1_range[0]
axis2_range[2] = axis2_range[1] - axis2_range[0]
# edit the message to BinMD for the merged scans
binning_script = 'Peak centers are :\n'
for peak_center in scan_peak_centre_list:
binning_script += '\t%.5f, %.5f, %.5f\n' % (peak_center[0], peak_center[1], peak_center[2])
if frame == 'HKL':
# HKL space
binning_script += 'BinMD(InputWorkspace=%s, ' \
'AlignedDim0=\'H,%.5f,%.5f,100\', ' \
'AlignedDim1=\'K,%.5f,%.5f,100\', ' \
'AlignedDim2=\'L,%.5f,%.5f,100\', ' \
'OutputWorkspace=%s)' % (merged_ws_name, axis0_range[0]-1, axis0_range[1] + 1,
axis1_range[0] - 1, axis1_range[1] + 1,
axis2_range[0] - 1, axis2_range[1] + 1,
merged_ws_name + '_Histogram')
elif frame == 'QSample':
# Q-space
binning_script += 'BinMD(InputWorkspace=%s, ' \
'AlignedDim0=\'Q_sample_x,%.5f,%.5f,100\', ' \
'AlignedDim1=\'Q_sample_y,%.5f,%.5f,100\', ' \
'AlignedDim2=\'Q_sample_z,%.5f,%.5f,100\', ' \
'OutputWorkspace=%s)' % (merged_ws_name, axis0_range[0]-1, axis0_range[1] + 1,
axis1_range[0] - 1, axis1_range[1] + 1,
axis2_range[0] - 1, axis2_range[1] + 1,
merged_ws_name + '_Histogram')
# END-IF
binning_script += '\nNote: Here the resolution is 100. You may modify it and view by SliceViewer.'
binning_script += '\n\nRange: \n'
binning_script += 'Axis 0: %.5f, %5f (%.5f)\n' % (axis0_range[0], axis0_range[1], axis0_range[2])
binning_script += 'Axis 1: %.5f, %5f (%.5f)\n' % (axis1_range[0], axis1_range[1], axis1_range[2])
binning_script += 'Axis 2: %.5f, %5f (%.5f)\n' % (axis2_range[0], axis2_range[1], axis2_range[2])
return binning_script
def is_calibration_match(self, exp_number, scan_number):
"""
check whether the pre-processed data has a set of matching calibrated parameters comparing to
the current one
:param exp_number:
:param scan_number:
:return:
"""
# no record is found. it should not happen!
if self._preprocessedInfoDict is None:
return False
if scan_number not in self._preprocessedInfoDict:
return False
# check others
unmatch_score = 0
# center
center_x, center_y = self.get_calibrated_det_center(exp_number)
if (center_x, center_y) != self._preprocessedInfoDict[scan_number]['Center']:
unmatch_score += 2
# wave length
wavelength = self.get_calibrated_wave_length(exp_number)
record_lambda = self._preprocessedInfoDict[scan_number]['WaveLength']
if type(record_lambda) != type(wavelength):
unmatch_score += 20
elif wavelength is not None and abs(wavelength - record_lambda) > 1.E-5:
unmatch_score += 40
# detector distance
det_sample_distance = self.get_calibrated_det_sample_distance(exp_number)
record_distance = self._preprocessedInfoDict[scan_number]['DetSampleDistance']
if type(det_sample_distance) != type(record_distance):
unmatch_score += 200
elif det_sample_distance is not None and abs(det_sample_distance - record_distance) > 1.E-5:
unmatch_score += 400
if unmatch_score > 0:
if self._debugPrintMode:
print('[INFO] Exp {0} Scan {1} has a unmatched calibrated record from pre-processed data. ID = {2}'
''.format(exp_number, scan_number, unmatch_score))
return False
if self._debugPrintMode:
print('[INFO] Exp {0} Scan {1} has a matched calibrated record from pre-processed data.')
return True
def load_mask_file(self, mask_file_name, mask_tag):
"""
load an XML mask file to a workspace and parse to ROI that can be mapped pixels in 2D notion
:param mask_file_name:
:param mask_tag
:return: 2-tuple (lower left corner (size = 2), upper right corner (size = 2))
both of them are in order of row and column number (y and x respectively)
"""
# load mask file
assert isinstance(mask_file_name, str), 'Mask file {0} shall be a string but not a {1}.' \
''.format(mask_file_name, type(mask_file_name))
assert isinstance(mask_tag, str), 'Mask tag {0} shall be a string but not a {1}.' \
''.format(mask_tag, type(mask_tag))
if os.path.exists(mask_file_name) is False:
raise RuntimeError('Mask file name {0} cannot be found.'.format(mask_tag))
# load
mantidsimple.LoadMask(Instrument='HB3A',
InputFile=mask_file_name,
OutputWorkspace=mask_tag)
# record
self.set_roi_workspace(roi_name=mask_tag, mask_ws_name=mask_tag)
# find out the range of the ROI in (Low left, upper right) mode
roi_range = process_mask.get_region_of_interest(mask_tag)
self.set_roi(mask_tag, roi_range[0], roi_range[1])
return roi_range
def load_peak_integration_table(self, table_file_name):
"""
load peak integration table
:param table_file_name:
:return:
"""
# load to a dictionary
try:
scan_peak_dict = peak_integration_utility.read_peak_integration_table_csv(table_file_name)
self._myLoadedPeakInfoDict.update(scan_peak_dict)
except RuntimeError as run_error:
return False, 'Failed to read: {0}'.format(run_error)
return True, None
def load_preprocessed_scan(self, exp_number, scan_number, md_dir, output_ws_name):
""" load preprocessed scan from hard disk
:return: (bool, str): loaded, message
"""
# check inputs
assert isinstance(exp_number, int), 'Experiment number {0} ({1}) must be an integer' \
''.format(exp_number, type(exp_number))
assert isinstance(scan_number, int), 'Scan number {0} ({1}) must be an integer.' \
''.format(scan_number, type(scan_number))
assert isinstance(md_dir, str), 'MD file directory {0} ({1}) must be a string.' \
''.format(md_dir, type(md_dir))
assert isinstance(output_ws_name, str), 'Output workspace name {0} ({1}) must be a string.' \
''.format(output_ws_name, type(output_ws_name))
if os.path.exists(md_dir) is False:
raise RuntimeError('Pre-processed directory {0} does not exist.'.format(md_dir))
# ws_name = 'Exp{0}_Scan{1}_MD'.format(exp_number, scan_number)
# md_file_path = os.path.join(md_dir, ws_name + '.nxs')
# 2-ways to get file name
if self._preprocessedInfoDict is None or scan_number not in self._preprocessedInfoDict:
md_file_path = fourcircle_utility.pre_processed_file_name(exp_number, scan_number, md_dir)
else:
md_file_path = self._preprocessedInfoDict[scan_number]['MD']
# check
| if os.path.exists(md_file_path) is False: | 9,108 | lcc_e | python | null | 43332efaef30a408cc9cab9136931131d536dab1b4bb3735 |
|
"""Core implementation of import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
#
# IMPORTANT: Whenever making changes to this module, be sure to run
# a top-level make in order to get the frozen version of the module
# update. Not doing so will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module
# in the early stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Bootstrap-related code ######################################################
_CASE_INSENSITIVE_PLATFORMS = 'win', 'cygwin', 'darwin'
def _make_relax_case():
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return b'PYTHONCASEOK' in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return False
return _relax_case
def _w_long(x):
"""Convert a 32-bit integer to little-endian."""
return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little')
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer."""
return int.from_bytes(int_bytes, 'little')
def _path_join(*path_parts):
"""Replacement for os.path.join()."""
return path_sep.join([part.rstrip(path_separators)
for part in path_parts if part])
def _path_split(path):
"""Replacement for os.path.split()."""
if len(path_separators) == 1:
front, _, tail = path.rpartition(path_sep)
return front, tail
for x in reversed(path):
if x in path_separators:
front, tail = path.rsplit(x, maxsplit=1)
return front, tail
return '', path
def _path_stat(path):
"""Stat the path.
Made a separate function to make it easier to override in experiments
(e.g. cache stat results).
"""
return _os.stat(path)
def _path_is_mode_type(path, mode):
"""Test whether the path is the specified mode type."""
try:
stat_info = _path_stat(path)
except OSError:
return False
return (stat_info.st_mode & 0o170000) == mode
def _path_isfile(path):
"""Replacement for os.path.isfile."""
return _path_is_mode_type(path, 0o100000)
def _path_isdir(path):
"""Replacement for os.path.isdir."""
if not path:
path = _os.getcwd()
return _path_is_mode_type(path, 0o040000)
def _write_atomic(path, data, mode=0o666):
"""Best-effort function to write data to a path atomically.
Be prepared to handle a FileExistsError if concurrent writing of the
temporary file is attempted."""
# id() is used to generate a pseudo-random filename.
path_tmp = '{}.{}'.format(path, id(path))
fd = _os.open(path_tmp,
_os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666)
try:
# We first write data to a temporary file, and then use os.replace() to
# perform an atomic rename.
with _io.FileIO(fd, 'wb') as file:
file.write(data)
_os.replace(path_tmp, path)
except OSError:
try:
_os.unlink(path_tmp)
except OSError:
pass
raise
def _wrap(new, old):
"""Simple substitute for functools.update_wrapper."""
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
def _new_module(name):
return type(sys)(name)
_code_type = type(_wrap.__code__)
class _ManageReload:
"""Manages the possible clean-up of sys.modules for load_module()."""
def __init__(self, name):
self._name = name
def __enter__(self):
self._is_reload = self._name in sys.modules
def __exit__(self, *args):
if any(arg is not None for arg in args) and not self._is_reload:
try:
del sys.modules[self._name]
except KeyError:
pass
# Module-level locking ########################################################
# A dict mapping module names to weakrefs of _ModuleLock instances
_module_locks = {}
# A dict mapping thread ids to _ModuleLock instances
_blocking_on = {}
class _DeadlockError(RuntimeError):
pass
class _ModuleLock:
"""A recursive lock implementation which is able to detect deadlocks
(e.g. thread 1 trying to take locks A then B, and thread 2 trying to
take locks B then A).
"""
def __init__(self, name):
self.lock = _thread.allocate_lock()
self.wakeup = _thread.allocate_lock()
self.name = name
self.owner = None
self.count = 0
self.waiters = 0
def has_deadlock(self):
# Deadlock avoidance for concurrent circular imports.
me = _thread.get_ident()
tid = self.owner
while True:
lock = _blocking_on.get(tid)
if lock is None:
return False
tid = lock.owner
if tid == me:
return True
def acquire(self):
"""
Acquire the module lock. If a potential deadlock is detected,
a _DeadlockError is raised.
Otherwise, the lock is always acquired and True is returned.
"""
tid = _thread.get_ident()
_blocking_on[tid] = self
try:
while True:
with self.lock:
if self.count == 0 or self.owner == tid:
self.owner = tid
self.count += 1
return True
if self.has_deadlock():
raise _DeadlockError('deadlock detected by %r' % self)
if self.wakeup.acquire(False):
self.waiters += 1
# Wait for a release() call
self.wakeup.acquire()
self.wakeup.release()
finally:
del _blocking_on[tid]
def release(self):
tid = _thread.get_ident()
with self.lock:
if self.owner != tid:
raise RuntimeError('cannot release un-acquired lock')
assert self.count > 0
self.count -= 1
if self.count == 0:
self.owner = None
if self.waiters:
self.waiters -= 1
self.wakeup.release()
def __repr__(self):
return '_ModuleLock({!r}) at {}'.format(self.name, id(self))
class _DummyModuleLock:
"""A simple _ModuleLock equivalent for Python builds without
multi-threading support."""
def __init__(self, name):
self.name = name
self.count = 0
def acquire(self):
self.count += 1
return True
def release(self):
if self.count == 0:
raise RuntimeError('cannot release un-acquired lock')
self.count -= 1
def __repr__(self):
return '_DummyModuleLock({!r}) at {}'.format(self.name, id(self))
class _ModuleLockManager:
def __init__(self, name):
self._name = name
self._lock = None
def __enter__(self):
try:
self._lock = _get_module_lock(self._name)
finally:
_imp.release_lock()
self._lock.acquire()
def __exit__(self, *args, **kwargs):
self._lock.release()
# The following two functions are for consumption by Python/import.c.
def _get_module_lock(name):
"""Get or create the module lock for a given module name.
Should only be called with the import lock taken."""
lock = None
try:
lock = _module_locks[name]()
except KeyError:
pass
if lock is None:
if _thread is None:
lock = _DummyModuleLock(name)
else:
lock = _ModuleLock(name)
def cb(_):
del _module_locks[name]
_module_locks[name] = _weakref.ref(lock, cb)
return lock
def _lock_unlock_module(name):
"""Release the global import lock, and acquires then release the
module lock for a given module name.
This is used to ensure a module is completely initialized, in the
event it is being imported by another thread.
Should only be called with the import lock taken."""
lock = _get_module_lock(name)
_imp.release_lock()
try:
lock.acquire()
except _DeadlockError:
# Concurrent circular import, we'll accept a partially initialized
# module object.
pass
else:
lock.release()
# Frame stripping magic ###############################################
def _call_with_frames_removed(f, *args, **kwds):
"""remove_importlib_frames in import.c will always remove sequences
of importlib frames that end with a call to this function
Use it instead of a normal call in places where including the importlib
frames introduces unwanted noise into the traceback (e.g. when executing
module code)
"""
return f(*args, **kwds)
# Finder/loader utility code ###############################################
# Magic word to reject .pyc files generated by other Python versions.
# It should change for each incompatible change to the bytecode.
#
# The value of CR and LF is incorporated so if you ever read or write
# a .pyc file in text mode the magic number will be wrong; also, the
# Apple MPW compiler swaps their values, botching string constants.
#
# The magic numbers must be spaced apart at least 2 values, as the
# -U interpeter flag will cause MAGIC+1 being used. They have been
# odd numbers for some time now.
#
# There were a variety of old schemes for setting the magic number.
# The current working scheme is to increment the previous value by
# 10.
#
# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic
# number also includes a new "magic tag", i.e. a human readable string used
# to represent the magic number in __pycache__ directories. When you change
# the magic number, you must also set a new unique magic tag. Generally this
# can be named after the Python major version of the magic number bump, but
# it can really be anything, as long as it's different than anything else
# that's come before. The tags are included in the following table, starting
# with Python 3.2a0.
#
# Known values:
# Python 1.5: 20121
# Python 1.5.1: 20121
# Python 1.5.2: 20121
# Python 1.6: 50428
# Python 2.0: 50823
# Python 2.0.1: 50823
# Python 2.1: 60202
# Python 2.1.1: 60202
# Python 2.1.2: 60202
# Python 2.2: 60717
# Python 2.3a0: 62011
# Python 2.3a0: 62021
# Python 2.3a0: 62011 (!)
# Python 2.4a0: 62041
# Python 2.4a3: 62051
# Python 2.4b1: 62061
# Python 2.5a0: 62071
# Python 2.5a0: 62081 (ast-branch)
# Python 2.5a0: 62091 (with)
# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)
# Python 2.5b3: 62101 (fix wrong code: for x, in ...)
# Python 2.5b3: 62111 (fix wrong code: x += yield)
# Python 2.5c1: 62121 (fix wrong lnotab with for loops and
# storing constants that should have been removed)
# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)
# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)
# Python 2.6a1: 62161 (WITH_CLEANUP optimization)
# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND)
# Python 2.7a0: 62181 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
# Python 2.7a0 62191 (introduce SETUP_WITH)
# Python 2.7a0 62201 (introduce BUILD_SET)
# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD)
# Python 3000: 3000
# 3010 (removed UNARY_CONVERT)
# 3020 (added BUILD_SET)
# 3030 (added keyword-only parameters)
# 3040 (added signature annotations)
# 3050 (print becomes a function)
# 3060 (PEP 3115 metaclass syntax)
# 3061 (string literals become unicode)
# 3071 (PEP 3109 raise changes)
# 3081 (PEP 3137 make __file__ and __name__ unicode)
# 3091 (kill str8 interning)
# 3101 (merge from 2.6a0, see 62151)
# 3103 (__file__ points to source file)
# Python 3.0a4: 3111 (WITH_CLEANUP optimization).
# Python 3.0a5: 3131 (lexical exception stacking, including POP_EXCEPT)
# Python 3.1a0: 3141 (optimize list, set and dict comprehensions:
# change LIST_APPEND and SET_ADD, add MAP_ADD)
# Python 3.1a0: 3151 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
# Python 3.2a0: 3160 (add SETUP_WITH)
# tag: cpython-32
# Python 3.2a1: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR)
# tag: cpython-32
# Python 3.2a2 3180 (add DELETE_DEREF)
# Python 3.3a0 3190 __class__ super closure changed
# Python 3.3a0 3200 (__qualname__ added)
# 3210 (added size modulo 2**32 to the pyc header)
# Python 3.3a1 3220 (changed PEP 380 implementation)
# Python 3.3a4 3230 (revert changes to implicit __class__ closure)
# Python 3.4a1 3250 (evaluate positional default arguments before
# keyword-only defaults)
# Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override
# free vars)
# Python 3.4a1 3270 (various tweaks to the __class__ closure)
# Python 3.4a1 3280 (remove implicit class argument)
# Python 3.4a4 3290 (changes to __qualname__ computation)
# Python 3.4a4 3300 (more changes to __qualname__ computation)
# Python 3.4rc2 3310 (alter __qualname__ computation)
#
# MAGIC must change whenever the bytecode emitted by the compiler may no
# longer be understood by older implementations of the eval loop (usually
# due to the addition of new opcodes).
MAGIC_NUMBER = (3310).to_bytes(2, 'little') + b'\r\n'
_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c
_PYCACHE = '__pycache__'
SOURCE_SUFFIXES = ['.py'] # _setup() adds .pyw as needed.
DEBUG_BYTECODE_SUFFIXES = ['.pyc']
OPTIMIZED_BYTECODE_SUFFIXES = ['.pyo']
def cache_from_source(path, debug_override=None):
"""Given the path to a .py file, return the path to its .pyc/.pyo file.
The .py file does not need to exist; this simply returns the path to the
.pyc/.pyo file calculated as if the .py file were imported. The extension
will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo.
If debug_override is not None, then it must be a boolean and is used in
place of sys.flags.optimize.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
debug = not sys.flags.optimize if debug_override is None else debug_override
if debug:
suffixes = DEBUG_BYTECODE_SUFFIXES
else:
suffixes = OPTIMIZED_BYTECODE_SUFFIXES
head, tail = _path_split(path)
base, sep, rest = tail.rpartition('.')
tag = sys.implementation.cache_tag
if tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
filename = ''.join([(base if base else rest), sep, tag, suffixes[0]])
return _path_join(head, _PYCACHE, filename)
def source_from_cache(path):
"""Given the path to a .pyc./.pyo file, return the path to its .py file.
The .pyc/.pyo file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc/.pyo file. If path does
not conform to PEP 3147 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
if sys.implementation.cache_tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
head, pycache_filename = _path_split(path)
head, pycache = _path_split(head)
if pycache != _PYCACHE:
raise ValueError('{} not bottom-level directory in '
'{!r}'.format(_PYCACHE, path))
if pycache_filename.count('.') != 2:
raise ValueError('expected only 2 dots in '
'{!r}'.format(pycache_filename))
base_filename = pycache_filename.partition('.')[0]
return _path_join(head, base_filename + SOURCE_SUFFIXES[0])
def _get_sourcefile(bytecode_path):
"""Convert a bytecode file path to a source path (if possible).
This function exists purely for backwards-compatibility for
PyImport_ExecCodeModuleWithFilenames() in the C API.
"""
if len(bytecode_path) == 0:
return None
rest, _, extension = bytecode_path.rpartition('.')
if not rest or extension.lower()[-3:-1] != 'py':
return bytecode_path
try:
source_path = source_from_cache(bytecode_path)
except (NotImplementedError, ValueError):
source_path = bytecode_path[:-1]
return source_path if _path_isfile(source_path) else bytecode_path
def _calc_mode(path):
"""Calculate the mode permissions for a bytecode file."""
try:
mode = _path_stat(path).st_mode
except OSError:
mode = 0o666
# We always ensure write access so we can update cached files
# later even when the source files are read-only on Windows (#6074)
mode |= 0o200
return mode
def _verbose_message(message, *args, verbosity=1):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
def _check_name(method):
"""Decorator to verify that the module being requested matches the one the
loader can handle.
The first argument (self) must define _name which the second argument is
compared against. If the comparison fails then ImportError is raised.
"""
def _check_name_wrapper(self, name=None, *args, **kwargs):
if name is None:
name = self.name
elif self.name != name:
raise ImportError('loader cannot handle %s' % name, name=name)
return method(self, name, *args, **kwargs)
_wrap(_check_name_wrapper, method)
return _check_name_wrapper
def _requires_builtin(fxn):
"""Decorator to verify the named module is built-in."""
def _requires_builtin_wrapper(self, fullname):
if fullname not in sys.builtin_module_names:
raise ImportError('{!r} is not a built-in module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_builtin_wrapper, fxn)
return _requires_builtin_wrapper
def _requires_frozen(fxn):
"""Decorator to verify the named module is frozen."""
def _requires_frozen_wrapper(self, fullname):
if not _imp.is_frozen(fullname):
raise ImportError('{!r} is not a frozen module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_frozen_wrapper, fxn)
return _requires_frozen_wrapper
def _find_module_shim(self, fullname):
"""Try to find a loader for the specified module by delegating to
self.find_loader().
This method is deprecated in favor of finder.find_spec().
"""
# Call find_loader(). If it returns a string (indicating this
# is a namespace package portion), generate a warning and
# return None.
loader, portions = self.find_loader(fullname)
if loader is None and len(portions):
msg = 'Not importing directory {}: missing __init__'
_warnings.warn(msg.format(portions[0]), ImportWarning)
return loader
def _load_module_shim(self, fullname):
"""Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
spec = spec_from_loader(fullname, self)
methods = _SpecMethods(spec)
if fullname in sys.modules:
module = sys.modules[fullname]
methods.exec(module)
return sys.modules[fullname]
else:
return methods.load()
def _validate_bytecode_header(data, source_stats=None, name=None, path=None):
"""Validate the header of the passed-in bytecode against source_stats (if
given) and returning the bytecode that can be compiled by compile().
All other arguments are used to enhance error reporting.
ImportError is raised when the magic number is incorrect or the bytecode is
found to be stale. EOFError is raised when the data is found to be
truncated.
"""
exc_details = {}
if name is not None:
exc_details['name'] = name
else:
# To prevent having to make all messages have a conditional name.
name = '<bytecode>'
if path is not None:
exc_details['path'] = path
magic = data[:4]
raw_timestamp = data[4:8]
raw_size = data[8:12]
if magic != MAGIC_NUMBER:
message = 'bad magic number in {!r}: {!r}'.format(name, magic)
_verbose_message(message)
raise ImportError(message, **exc_details)
elif len(raw_timestamp) != 4:
message = 'reached EOF while reading timestamp in {!r}'.format(name)
_verbose_message(message)
raise EOFError(message)
elif len(raw_size) != 4:
message = 'reached EOF while reading size of source in {!r}'.format(name)
_verbose_message(message)
raise EOFError(message)
if source_stats is not None:
try:
source_mtime = int(source_stats['mtime'])
except KeyError:
pass
else:
if _r_long(raw_timestamp) != source_mtime:
message = 'bytecode is stale for {!r}'.format(name)
_verbose_message(message)
raise ImportError(message, **exc_details)
try:
source_size = source_stats['size'] & 0xFFFFFFFF
except KeyError:
pass
else:
if _r_long(raw_size) != source_size:
raise ImportError('bytecode is stale for {!r}'.format(name),
**exc_details)
return data[12:]
def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None):
"""Compile bytecode as returned by _validate_bytecode_header()."""
code = marshal.loads(data)
if isinstance(code, _code_type):
_verbose_message('code object from {!r}', bytecode_path)
if source_path is not None:
_imp._fix_co_filename(code, source_path)
return code
else:
raise ImportError('Non-code object in {!r}'.format(bytecode_path),
name=name, path=bytecode_path)
def _code_to_bytecode(code, mtime=0, source_size=0):
"""Compile a code object into bytecode for writing out to a byte-compiled
file."""
data = bytearray(MAGIC_NUMBER)
data.extend(_w_long(mtime))
data.extend(_w_long(source_size))
data.extend(marshal.dumps(code))
return data
def decode_source(source_bytes):
"""Decode bytes representing source code and return the string.
Universal newline support is used in the decoding.
"""
import tokenize # To avoid bootstrap issues.
source_bytes_readline = _io.BytesIO(source_bytes).readline
encoding = tokenize.detect_encoding(source_bytes_readline)
newline_decoder = _io.IncrementalNewlineDecoder(None, True)
return newline_decoder.decode(source_bytes.decode(encoding[0]))
# Module specifications #######################################################
def _module_repr(module):
# The implementation of ModuleType__repr__().
loader = getattr(module, '__loader__', None)
if hasattr(loader, 'module_repr'):
# As soon as BuiltinImporter, FrozenImporter, and NamespaceLoader
# drop their implementations for module_repr. we can add a
# deprecation warning here.
try:
return loader.module_repr(module)
except Exception:
pass
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return _SpecMethods(spec).module_repr()
# We could use module.__class__.__name__ instead of 'module' in the
# various repr permutations.
try:
name = module.__name__
except AttributeError:
name = '?'
try:
filename = module.__file__
except AttributeError:
if loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, loader)
else:
return '<module {!r} from {!r}>'.format(name, filename)
class _installed_safely:
def __init__(self, module):
self._module = module
self._spec = module.__spec__
def __enter__(self):
# This must be done before putting the module in sys.modules
# (otherwise an optimization shortcut in import.c becomes
# wrong)
self._spec._initializing = True
sys.modules[self._spec.name] = self._module
def __exit__(self, *args):
try:
spec = self._spec
if any(arg is not None for arg in args):
try:
del sys.modules[spec.name]
except KeyError:
pass
else:
_verbose_message('import {!r} # {!r}', spec.name, spec.loader)
finally:
self._spec._initializing = False
class ModuleSpec:
"""The specification for a module, used for loading.
A module's spec is the source for information about the module. For
data associated with the module, including source, use the spec's
loader.
`name` is the absolute name of the module. `loader` is the loader
to use when loading the module. `parent` is the name of the
package the module is in. The parent is derived from the name.
`is_package` determines if the module is considered a package or
not. On modules this is reflected by the `__path__` attribute.
`origin` is the specific location used by the loader from which to
load the module, if that information is available. When filename is
set, origin will match.
`has_location` indicates that a spec's "origin" reflects a location.
When this is True, `__file__` attribute of the module is set.
`cached` is the location of the cached bytecode file, if any. It
corresponds to the `__cached__` attribute.
`submodule_search_locations` is the sequence of path entries to
search when importing submodules. If set, is_package should be
True--and False otherwise.
Packages are simply modules that (may) have submodules. If a spec
has a non-None value in `submodule_search_locations`, the import
system will consider modules loaded from the spec as packages.
Only finders (see importlib.abc.MetaPathFinder and
importlib.abc.PathEntryFinder) should modify ModuleSpec instances.
"""
def __init__(self, name, loader, *, origin=None, loader_state=None,
is_package=None):
self.name = name
self.loader = loader
self.origin = origin
self.loader_state = loader_state
self.submodule_search_locations = [] if is_package else None
# file-location attributes
self._set_fileattr = False
self._cached = None
def __repr__(self):
args = ['name={!r}'.format(self.name),
'loader={!r}'.format(self.loader)]
if self.origin is not None:
args.append('origin={!r}'.format(self.origin))
if self.submodule_search_locations is not None:
args.append('submodule_search_locations={}'
.format(self.submodule_search_locations))
return '{}({})'.format(self.__class__.__name__, ', '.join(args))
def __eq__(self, other):
smsl = self.submodule_search_locations
try:
return (self.name == other.name and
self.loader == other.loader and
self.origin == other.origin and
smsl == other.submodule_search_locations and
self.cached == other.cached and
self.has_location == other.has_location)
except AttributeError:
return False
@property
def cached(self):
if self._cached is None:
if self.origin is not None and self._set_fileattr:
filename = self.origin
if filename.endswith(tuple(SOURCE_SUFFIXES)):
try:
self._cached = cache_from_source(filename)
except NotImplementedError:
pass
elif filename.endswith(tuple(BYTECODE_SUFFIXES)):
self._cached = filename
return self._cached
@cached.setter
def cached(self, cached):
self._cached = cached
@property
def parent(self):
"""The name of the module's parent."""
if self.submodule_search_locations is None:
return self.name.rpartition('.')[0]
else:
return self.name
@property
def has_location(self):
return self._set_fileattr
@has_location.setter
def has_location(self, value):
self._set_fileattr = bool(value)
def spec_from_loader(name, loader, *, origin=None, is_package=None):
"""Return a module spec based on various loader methods."""
if hasattr(loader, 'get_filename'):
if is_package is None:
return spec_from_file_location(name, loader=loader)
search = [] if is_package else None
return spec_from_file_location(name, loader=loader,
submodule_search_locations=search)
if is_package is None:
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
is_package = None # aka, undefined
else:
# the default
is_package = False
return ModuleSpec(name, loader, origin=origin, is_package=is_package)
_POPULATE = object()
def spec_from_file_location(name, location=None, *, loader=None,
submodule_search_locations=_POPULATE):
"""Return a module spec based on a file location.
To indicate that the module is a package, set
submodule_search_locations to a list of directory paths. An
empty list is sufficient, though its not otherwise useful to the
import system.
The loader must take a spec as its only __init__() arg.
"""
if location is None:
# The caller may simply want a partially populated location-
# oriented spec. So we set the location to a bogus value and
# fill in as much as we can.
location = '<unknown>'
if hasattr(loader, 'get_filename'):
# ExecutionLoader
try:
location = loader.get_filename(name)
except ImportError:
pass
# If the location is on the filesystem, but doesn't actually exist,
# we could return None here, indicating that the location is not
# valid. However, we don't have a good way of testing since an
# indirect location (e.g. a zip file or URL) will look like a
# non-existent file relative to the filesystem.
spec = ModuleSpec(name, loader, origin=location)
spec._set_fileattr = True
# Pick a loader if one wasn't provided.
if loader is None:
for loader_class, suffixes in _get_supported_file_loaders():
if location.endswith(tuple(suffixes)):
loader = loader_class(name, location)
spec.loader = loader
break
else:
return None
# Set submodule_search_paths appropriately.
if submodule_search_locations is _POPULATE:
# Check the loader.
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
pass
else:
if is_package:
spec.submodule_search_locations = []
else:
spec.submodule_search_locations = submodule_search_locations
if spec.submodule_search_locations == []:
if location:
dirname = _path_split(location)[0]
spec.submodule_search_locations.append(dirname)
return spec
def _spec_from_module(module, loader=None, origin=None):
# This function is meant for use in _setup().
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return spec
name = module.__name__
if loader is None:
try:
loader = module.__loader__
except AttributeError:
# loader will stay None.
pass
try:
location = module.__file__
except AttributeError:
location = None
if origin is None:
if location is None:
try:
origin = loader._ORIGIN
except AttributeError:
origin = None
else:
origin = location
try:
cached = module.__cached__
except AttributeError:
cached = None
try:
submodule_search_locations = list(module.__path__)
except AttributeError:
submodule_search_locations = None
spec = ModuleSpec(name, loader, origin=origin)
spec._set_fileattr = False if location is None else True
spec.cached = cached
spec.submodule_search_locations = submodule_search_locations
return spec
class _SpecMethods:
"""Convenience wrapper around spec objects to provide spec-specific
methods."""
# The various spec_from_* functions could be made factory methods here.
def __init__(self, spec):
self.spec = spec
def module_repr(self):
"""Return the repr to use for the module."""
# We mostly replicate _module_repr() using the spec attributes.
spec = self.spec
name = '?' if spec.name is None else spec.name
if spec.origin is None:
if spec.loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, spec.loader)
else:
if spec.has_location:
return '<module {!r} from {!r}>'.format(name, spec.origin)
else:
return '<module {!r} ({})>'.format(spec.name, spec.origin)
def init_module_attrs(self, module, *, _override=False, _force_name=True):
"""Set the module's attributes.
All missing import-related module attributes will be set. Here
is how the spec attributes map onto the module:
spec.name -> module.__name__
spec.loader -> module.__loader__
spec.parent -> module.__package__
spec -> module.__spec__
Optional:
spec.origin -> module.__file__ (if spec.set_fileattr is true)
spec.cached -> module.__cached__ (if __file__ also set)
spec.submodule_search_locations -> module.__path__ (if set)
"""
spec = self.spec
# The passed in module may be not support attribute assignment,
# in which case we simply don't set the attributes.
# __name__
if (_override or _force_name or
getattr(module, '__name__', None) is None):
try:
module.__name__ = spec.name
except AttributeError:
pass
# __loader__
if _override or getattr(module, '__loader__', None) is None:
loader = spec.loader
if loader is None:
# A backward compatibility hack.
if spec.submodule_search_locations is not None:
loader = _NamespaceLoader.__new__(_NamespaceLoader)
loader._path = spec.submodule_search_locations
try:
module.__loader__ = loader
except AttributeError:
pass
# __package__
if _override or getattr(module, '__package__', None) is None:
try:
module.__package__ = spec.parent
except AttributeError:
pass
# __spec__
try:
module.__spec__ = spec
except AttributeError:
pass
# __path__
if _override or getattr(module, '__path__', None) is None:
if spec.submodule_search_locations is not None:
try:
module.__path__ = spec.submodule_search_locations
except AttributeError:
pass
if spec.has_location:
# __file__
if _override or getattr(module, '__file__', None) is None:
try:
module.__file__ = spec.origin
except AttributeError:
pass
# __cached__
if _override or getattr(module, '__cached__', None) is None:
if spec.cached is not None:
try:
module.__cached__ = spec.cached
except AttributeError:
pass
def create(self):
"""Return a new module to be loaded.
The import-related module attributes are also set with the
appropriate values from the spec.
"""
spec = self.spec
# Typically loaders will not implement create_module().
if hasattr(spec.loader, 'create_module'):
# If create_module() returns `None` it means the default
# module creation should be used.
module = spec.loader.create_module(spec)
else:
module = None
if module is None:
# This must be done before open() is ever called as the 'io'
# module implicitly imports 'locale' and would otherwise
# trigger an infinite loop.
module = _new_module(spec.name)
self.init_module_attrs(module)
return module
def _exec(self, module):
"""Do everything necessary to execute the module.
The namespace of `module` is used as the target of execution.
This method uses the loader's `exec_module()` method.
"""
self.spec.loader.exec_module(module)
# Used by importlib.reload() and _load_module_shim().
def exec(self, module):
"""Execute the spec in an existing module's namespace."""
name = self.spec.name
_imp.acquire_lock()
with _ModuleLockManager(name):
if sys.modules.get(name) is not module:
msg = 'module {!r} not in sys.modules'.format(name)
raise ImportError(msg, name=name)
if self.spec.loader is None:
if self.spec.submodule_search_locations is None:
raise ImportError('missing loader', name=self.spec.name)
# namespace package
self.init_module_attrs(module, _override=True)
return module
self.init_module_attrs(module, _override=True)
if not hasattr(self.spec.loader, 'exec_module'):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
self.spec.loader.load_module(name)
else:
self._exec(module)
return sys.modules[name]
def _load_backward_compatible(self):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
spec = self.spec
spec.loader.load_module(spec.name)
# The module must be in sys.modules at this point!
module = sys.modules[spec.name]
if getattr(module, '__loader__', None) is None:
try:
module.__loader__ = spec.loader
except AttributeError:
pass
if getattr(module, '__package__', None) is None:
try:
# Since module.__path__ may not line up with
# spec.submodule_search_paths, we can't necessarily rely
# on spec.parent here.
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = spec.name.rpartition('.')[0]
except AttributeError:
pass
if getattr(module, '__spec__', None) is None:
try:
module.__spec__ = spec
except AttributeError:
pass
return module
def _load_unlocked(self):
# A helper for direct use by the import system.
if self.spec.loader is not None:
# not a namespace package
if not hasattr(self.spec.loader, 'exec_module'):
return self._load_backward_compatible()
module = self.create()
with _installed_safely(module):
if self.spec.loader is None:
if self.spec.submodule_search_locations is None:
raise ImportError('missing loader', name=self.spec.name)
# A namespace package so do nothing.
else:
self._exec(module)
# We don't ensure that the import-related module attributes get
# set in the sys.modules replacement case. Such modules are on
# their own.
return sys.modules[self.spec.name]
# A method used during testing of _load_unlocked() and by
# _load_module_shim().
def load(self):
"""Return a new module object, loaded by the spec's loader.
The module is not added to its parent.
If a module is already in sys.modules, that existing module gets
clobbered.
"""
_imp.acquire_lock()
with _ModuleLockManager(self.spec.name):
return self._load_unlocked()
def _fix_up_module(ns, name, pathname, cpathname=None):
# This function is used by PyImport_ExecCodeModuleObject().
loader = ns.get('__loader__')
spec = ns.get('__spec__')
if not loader:
if spec:
loader = spec.loader
elif pathname == cpathname:
loader = SourcelessFileLoader(name, pathname)
else:
loader = SourceFileLoader(name, pathname)
if not spec:
spec = spec_from_file_location(name, pathname, loader=loader)
try:
ns['__spec__'] = spec
ns['__loader__'] = loader
ns['__file__'] = pathname
ns['__cached__'] = cpathname
except Exception:
# Not important enough to report.
pass
# Loaders #####################################################################
class BuiltinImporter:
"""Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (built-in)>'.format(module.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if path is not None:
return None
if _imp.is_builtin(fullname):
return spec_from_loader(fullname, cls, origin='built-in')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
return spec.loader if spec is not None else None
@classmethod
@_requires_builtin
def load_module(cls, fullname):
"""Load a built-in module."""
# Once an exec_module() implementation is added we can also
# add a deprecation warning here.
with _ManageReload(fullname):
module = _call_with_frames_removed(_imp.init_builtin, fullname)
module.__loader__ = cls
module.__package__ = ''
return module
@classmethod
@_requires_builtin
def get_code(cls, fullname):
"""Return None as built-in modules do not have code objects."""
return None
@classmethod
@_requires_builtin
def get_source(cls, fullname):
"""Return None as built-in modules do not have source code."""
return None
@classmethod
@_requires_builtin
def is_package(cls, fullname):
"""Return False as built-in modules are never packages."""
return False
class FrozenImporter:
"""Meta path import for frozen modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(m):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (frozen)>'.format(m.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if _imp.is_frozen(fullname):
return spec_from_loader(fullname, cls, origin='frozen')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find a frozen module.
This method is deprecated. Use find_spec() instead.
"""
return cls if _imp.is_frozen(fullname) else None
@staticmethod
def exec_module(module):
name = module.__spec__.name
if not _imp.is_frozen(name):
raise ImportError('{!r} is not a frozen module'.format(name),
name=name)
code = _call_with_frames_removed(_imp.get_frozen_object, name)
exec(code, module.__dict__)
@classmethod
def load_module(cls, fullname):
"""Load a frozen module.
This method is deprecated. Use exec_module() instead.
"""
return _load_module_shim(cls, fullname)
@classmethod
@_requires_frozen
def get_code(cls, fullname):
"""Return the code object for the frozen module."""
return _imp.get_frozen_object(fullname)
@classmethod
@_requires_frozen
def get_source(cls, fullname):
"""Return None as frozen modules do not have source code."""
return None
@classmethod
@_requires_frozen
def is_package(cls, fullname):
"""Return True if the frozen module is a package."""
return _imp.is_frozen_package(fullname)
class WindowsRegistryFinder:
"""Meta path finder for modules declared in the Windows registry."""
REGISTRY_KEY = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}')
REGISTRY_KEY_DEBUG = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}\\Debug')
DEBUG_BUILD = False # Changed in _setup()
@classmethod
def _open_registry(cls, key):
try:
return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key)
except OSError:
return _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key)
@classmethod
def _search_registry(cls, fullname):
if cls.DEBUG_BUILD:
registry_key = cls.REGISTRY_KEY_DEBUG
else:
registry_key = cls.REGISTRY_KEY
key = registry_key.format(fullname=fullname,
sys_version=sys.version[:3])
try:
with cls._open_registry(key) as hkey:
filepath = _winreg.QueryValue(hkey, '')
except OSError:
return None
return filepath
@classmethod
def find_spec(cls, fullname, path=None, target=None):
filepath = cls._search_registry(fullname)
if filepath is None:
return None
try:
_path_stat(filepath)
except OSError:
return None
for loader, suffixes in _get_supported_file_loaders():
if filepath.endswith(tuple(suffixes)):
spec = spec_from_loader(fullname, loader(fullname, filepath),
origin=filepath)
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""Find module named in the registry.
This method is deprecated. Use exec_module() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is not None:
return spec.loader
else:
return None
class _LoaderBasics:
"""Base class of common code needed by both SourceLoader and
SourcelessFileLoader."""
def is_package(self, fullname):
"""Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'."""
filename = _path_split(self.get_filename(fullname))[1]
filename_base = filename.rsplit('.', 1)[0]
tail_name = fullname.rpartition('.')[2]
return filename_base == '__init__' and tail_name != '__init__'
def exec_module(self, module):
"""Execute the module."""
code = self.get_code(module.__name__)
if code is None:
raise ImportError('cannot load module {!r} when get_code() '
'returns None'.format(module.__name__))
_call_with_frames_removed(exec, code, module.__dict__)
load_module = _load_module_shim
class SourceLoader(_LoaderBasics):
def path_mtime(self, path):
"""Optional method that returns the modification time (an int) for the
specified path, where path is a str.
Raises IOError when the path cannot be handled.
"""
raise IOError
def path_stats(self, path):
"""Optional method returning a metadata dict for the specified path
to by the path (str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
Implementing this method allows the loader to read bytecode files.
Raises IOError when the path cannot be handled.
"""
return {'mtime': self.path_mtime(path)}
def _cache_bytecode(self, source_path, cache_path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions
"""
# For backwards compatibility, we delegate to set_data()
return self.set_data(cache_path, data)
def set_data(self, path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
"""
def get_source(self, fullname):
"""Concrete implementation of InspectLoader.get_source."""
path = self.get_filename(fullname)
try:
source_bytes = self.get_data(path)
except OSError as exc:
raise ImportError('source not available through get_data()',
name=fullname) from exc
return decode_source(source_bytes)
def source_to_code(self, data, path, *, _optimize=-1):
"""Return the code object compiled from source.
The 'data' argument can be any object type that compile() supports.
"""
return _call_with_frames_removed(compile, data, path, 'exec',
dont_inherit=True, optimize=_optimize)
def get_code(self, fullname):
"""Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
source_path = self.get_filename(fullname)
source_mtime = None
try:
bytecode_path = cache_from_source(source_path)
except NotImplementedError:
bytecode_path = None
else:
try:
st = self.path_stats(source_path)
except IOError:
pass
else:
source_mtime = int(st['mtime'])
try:
data = self.get_data(bytecode_path)
except OSError:
pass
else:
try:
bytes_data = _validate_bytecode_header(data,
source_stats=st, name=fullname,
path=bytecode_path)
except (ImportError, EOFError):
pass
else:
_verbose_message('{} matches {}', bytecode_path,
source_path)
return _compile_bytecode(bytes_data, name=fullname,
bytecode_path=bytecode_path,
source_path=source_path)
source_bytes = self.get_data(source_path)
code_object = self.source_to_code(source_bytes, source_path)
_verbose_message('code object from {}', source_path)
if (not sys.dont_write_bytecode and bytecode_path is not None and
source_mtime is not None):
data = _code_to_bytecode(code_object, source_mtime,
len(source_bytes))
try:
self._cache_bytecode(source_path, bytecode_path, data)
_verbose_message('wrote {!r}', bytecode_path)
except NotImplementedError:
pass
return code_object
class FileLoader:
"""Base file loader class which implements the loader protocol methods that
require file system usage."""
def __init__(self, fullname, path):
"""Cache the module name and the path to the file found by the
finder."""
self.name = fullname
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
@_check_name
def load_module(self, fullname):
"""Load a module from a file.
This method is deprecated. Use exec_module() instead.
"""
# The only reason for this method is for the name check.
# Issue #14857: Avoid the zero-argument form of super so the implementation
# of that form can be updated without breaking the frozen module
return super(FileLoader, self).load_module(fullname)
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
def get_data(self, path):
"""Return the data from path as raw bytes."""
with _io.FileIO(path, 'r') as file:
return file.read()
class SourceFileLoader(FileLoader, SourceLoader):
"""Concrete implementation of SourceLoader using the file system."""
def path_stats(self, path):
"""Return the metadata for the path."""
st = _path_stat(path)
return {'mtime': st.st_mtime, 'size': st.st_size}
def _cache_bytecode(self, source_path, bytecode_path, data):
# Adapt between the two APIs
mode = _calc_mode(source_path)
return self.set_data(bytecode_path, data, _mode=mode)
def set_data(self, path, data, *, _mode=0o666):
"""Write bytes data to a file."""
parent, filename = _path_split(path)
path_parts = []
# Figure out what directories are missing.
while parent and not _path_isdir(parent):
parent, part = _path_split(parent)
path_parts.append(part)
# Create needed directories.
for part in reversed(path_parts):
parent = _path_join(parent, part)
try:
_os.mkdir(parent)
except FileExistsError:
# Probably another Python process already created the dir.
continue
except OSError as exc:
# Could be a permission error, read-only filesystem: just forget
# about writing the data.
_verbose_message('could not create {!r}: {!r}', parent, exc)
return
try:
_write_atomic(path, data, _mode)
_verbose_message('created {!r}', path)
except OSError as exc:
# Same as above: just don't write the bytecode.
_verbose_message('could not create {!r}: {!r}', path, exc)
class SourcelessFileLoader(FileLoader, _LoaderBasics):
"""Loader which handles sourceless file imports."""
def get_code(self, fullname):
path = self.get_filename(fullname)
data = self.get_data(path)
bytes_data = _validate_bytecode_header(data, name=fullname, path=path)
return _compile_bytecode(bytes_data, name=fullname, bytecode_path=path)
def get_source(self, fullname):
"""Return None as there is no source code."""
return None
# Filled in by _setup().
EXTENSION_SUFFIXES = []
class ExtensionFileLoader:
"""Loader for extension modules.
The constructor is designed to work with FileFinder.
"""
def __init__(self, name, path):
self.name = name
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
@_check_name
def load_module(self, fullname):
"""Load an extension module."""
# Once an exec_module() implementation is added we can also
# add a deprecation warning here.
with _ManageReload(fullname):
module = _call_with_frames_removed(_imp.load_dynamic,
fullname, self.path)
_verbose_message('extension module loaded from {!r}', self.path)
is_package = self.is_package(fullname)
if is_package and not hasattr(module, '__path__'):
module.__path__ = [_path_split(self.path)[0]]
module.__loader__ = self
module.__package__ = module.__name__
if not is_package:
module.__package__ = module.__package__.rpartition('.')[0]
return module
def is_package(self, fullname):
"""Return True if the extension module is a package."""
file_name = _path_split(self.path)[1]
return any(file_name == '__init__' + suffix
for suffix in EXTENSION_SUFFIXES)
def get_code(self, fullname):
"""Return None as an extension module cannot create a code object."""
return None
def get_source(self, fullname):
"""Return None as extension modules have no source code."""
return None
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
class _NamespacePath:
"""Represents a namespace package's path. It uses the module name
to find its parent module, and from there it looks up the parent's
__path__. When this changes, the module's own path is recomputed,
using path_finder. For top-level modules, the parent module's path
is sys.path."""
def __init__(self, name, path, path_finder):
self._name = name
self._path = path
self._last_parent_path = tuple(self._get_parent_path())
self._path_finder = path_finder
def _find_parent_path_names(self):
"""Returns a tuple of (parent-module-name, parent-path-attr-name)"""
parent, dot, me = self._name.rpartition('.')
if dot == '':
# This is a top-level module. sys.path contains the parent path.
return 'sys', 'path'
# Not a top-level module. parent-module.__path__ contains the
# parent path.
return parent, '__path__'
def _get_parent_path(self):
parent_module_name, path_attr_name = self._find_parent_path_names()
return getattr(sys.modules[parent_module_name], path_attr_name)
def _recalculate(self):
# If the parent's path has changed, recalculate _path
parent_path = tuple(self._get_parent_path()) # Make a copy
if parent_path != self._last_parent_path:
spec = self._path_finder(self._name, parent_path)
# Note that no changes are made if a loader is returned, but we
# do remember the new parent path
if spec is not None and spec.loader is None:
if spec.submodule_search_locations:
self._path = spec.submodule_search_locations
self._last_parent_path = parent_path # Save the copy
return self._path
def __iter__(self):
return iter(self._recalculate())
def __len__(self):
return len(self._recalculate())
def __repr__(self):
return '_NamespacePath({!r})'.format(self._path)
def __contains__(self, item):
return item in self._recalculate()
def append(self, item):
self._path.append(item)
# We use this exclusively in init_module_attrs() for backward-compatibility.
class _NamespaceLoader:
def __init__(self, name, path, path_finder):
self._path = _NamespacePath(name, path, path_finder)
@classmethod
def module_repr(cls, module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (namespace)>'.format(module.__name__)
def is_package(self, fullname):
return True
def get_source(self, fullname):
return ''
def get_code(self, fullname):
return compile('', '<string>', 'exec', dont_inherit=True)
def exec_module(self, module):
pass
def load_module(self, fullname):
"""Load a namespace module.
This method is deprecated. Use exec_module() instead.
"""
# The import system never calls this method.
_verbose_message('namespace module loaded with path {!r}', self._path)
return _load_module_shim(self, fullname)
# Finders #####################################################################
class PathFinder:
"""Meta path finder for sys.path and package __path__ attributes."""
@classmethod
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for finder in sys.path_importer_cache.values():
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
@classmethod
def _path_hooks(cls, path):
"""Search sequence of hooks for a finder for 'path'.
If 'hooks' is false then use sys.path_hooks.
"""
if not sys.path_hooks:
_warnings.warn('sys.path_hooks is empty', ImportWarning)
for hook in sys.path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None
@classmethod
def _path_importer_cache(cls, path):
"""Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None.
"""
if path == '':
path = _os.getcwd()
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder
@classmethod
def _legacy_get_spec(cls, fullname, finder):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
if hasattr(finder, 'find_loader'):
loader, portions = finder.find_loader(fullname)
else:
loader = finder.find_module(fullname)
portions = []
if loader is not None:
return spec_from_loader(fullname, loader)
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = portions
return spec
@classmethod
def _get_spec(cls, fullname, path, target=None):
"""Find the loader or namespace_path for this module/package name."""
# If this ends up being a namespace package, namespace_path is
# the list of paths that will become its __path__
namespace_path = []
for entry in path:
if not isinstance(entry, (str, bytes)):
continue
finder = cls._path_importer_cache(entry)
if finder is not None:
if hasattr(finder, 'find_spec'):
spec = finder.find_spec(fullname, target)
else:
spec = cls._legacy_get_spec(fullname, finder)
if spec is None:
continue
if spec.loader is not None:
return spec
portions = spec.submodule_search_locations
if portions is None:
raise ImportError('spec missing loader')
# This is possibly part of a namespace package.
# Remember these path entries (if any) for when we
# create a namespace package, and continue iterating
# on path.
namespace_path.extend(portions)
else:
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = namespace_path
return spec
@classmethod
def find_spec(cls, fullname, path=None, target=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache."""
if path is None:
path = sys.path
spec = cls._get_spec(fullname, path, target)
if spec is None:
return None
elif spec.loader is None:
namespace_path = spec.submodule_search_locations
if namespace_path:
# We found at least one namespace path. Return a
# spec which can create the namespace package.
spec.origin = 'namespace'
spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec)
return spec
else:
return None
else:
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is None:
return None
return spec.loader
class FileFinder:
"""File-based finder.
Interactions with the file system are cached for performance, being
refreshed when the directory the finder is handling has been modified.
"""
def __init__(self, path, *loader_details):
"""Initialize with the path to search on and a variable number of
2-tuples containing the loader and the file suffixes the loader
recognizes."""
loaders = []
for loader, suffixes in loader_details:
loaders.extend((suffix, loader) for suffix in suffixes)
self._loaders = loaders
# Base (directory) path
self.path = path or '.'
self._path_mtime = -1
self._path_cache = set()
self._relaxed_path_cache = set()
def invalidate_caches(self):
"""Invalidate the directory mtime."""
self._path_mtime = -1
find_module = _find_module_shim
def find_loader(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions).
This method is deprecated. Use find_spec() instead.
"""
spec = self.find_spec(fullname)
if spec is None:
return None, []
return spec.loader, spec.submodule_search_locations or []
def _get_spec(self, loader_class, fullname, path, smsl, target):
loader = loader_class(fullname, path)
return spec_from_file_location(fullname, path, loader=loader,
submodule_search_locations=smsl)
def find_spec(self, fullname, target=None):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions)."""
is_namespace = False
tail_module = fullname.rpartition('.')[2]
try:
mtime = _path_stat(self.path or _os.getcwd()).st_mtime
except OSError:
mtime = -1
if mtime != self._path_mtime:
self._fill_cache()
self._path_mtime = mtime
# tail_module keeps the original casing, for __file__ and friends
if _relax_case():
cache = self._relaxed_path_cache
cache_module = tail_module.lower()
else:
cache = self._path_cache
cache_module = tail_module
# Check if the module is the name of a directory (and thus a package).
if cache_module in cache:
base_path = _path_join(self.path, tail_module)
for suffix, loader_class in self._loaders:
init_filename = '__init__' + suffix
full_path = _path_join(base_path, init_filename)
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path, [base_path], target)
else:
# If a namespace package, return the path if we don't
# find a module in the next section.
is_namespace = _path_isdir(base_path)
# Check for a file w/ a proper suffix exists.
for suffix, loader_class in self._loaders:
full_path = _path_join(self.path, tail_module + suffix)
_verbose_message('trying {}'.format(full_path), verbosity=2)
if cache_module + suffix in cache:
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path, None, target)
if is_namespace:
_verbose_message('possible namespace for {}'.format(base_path))
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = [base_path]
return spec
return None
def _fill_cache(self):
"""Fill the cache of potential modules and packages for this directory."""
path = self.path
try:
contents = _os.listdir(path or _os.getcwd())
except (FileNotFoundError, PermissionError, NotADirectoryError):
# Directory has either been removed, turned into a file, or made
# unreadable.
contents = []
# We store two cached versions, to handle runtime changes of the
# PYTHONCASEOK environment variable.
if not sys.platform.startswith('win'):
self._path_cache = set(contents)
else:
# Windows users can import modules with case-insensitive file
# suffixes (for legacy reasons). Make the suffix lowercase here
# so it's done once instead of for every import. This is safe as
# the specified suffixes to check against are always specified in a
# case-sensitive manner.
lower_suffix_contents = set()
for item in contents:
name, dot, suffix = item.partition('.')
if dot:
new_name = '{}.{}'.format(name, suffix.lower())
else:
new_name = name
lower_suffix_contents.add(new_name)
self._path_cache = lower_suffix_contents
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
self._relaxed_path_cache = {fn.lower() for fn in contents}
@classmethod
def path_hook(cls, *loader_details):
"""A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
def path_hook_for_FileFinder(path):
"""Path hook for importlib.machinery.FileFinder."""
if not _path_isdir(path):
raise ImportError('only directories are supported', path=path)
return cls(path, *loader_details)
return path_hook_for_FileFinder
def __repr__(self):
return 'FileFinder({!r})'.format(self.path)
# Import itself ###############################################################
class _ImportLockContext:
"""Context manager for the import lock."""
def __enter__(self):
"""Acquire the import lock."""
_imp.acquire_lock()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Release the import lock regardless of any raised exceptions."""
_imp.release_lock()
def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit('.', level - 1)
if len(bits) < level:
raise ValueError('attempted relative import beyond top-level package')
base = bits[0]
return '{}.{}'.format(base, name) if name else base
def _find_spec_legacy(finder, name, path):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
loader = finder.find_module(name, path)
if loader is None:
return None
return spec_from_loader(name, loader)
def _find_spec(name, path, target=None):
"""Find a module's loader."""
if not sys.meta_path:
_warnings.warn('sys.meta_path is empty', ImportWarning)
# We check sys.modules here for the reload case. While a passed-in
# target will usually indicate a reload there is no guarantee, whereas
# sys.modules provides one.
is_reload = name in sys.modules
for finder in sys.meta_path:
with _ImportLockContext():
try:
find_spec = finder.find_spec
except AttributeError:
spec = _find_spec_legacy(finder, name, path)
if spec is None:
continue
else:
spec = find_spec(name, path, target)
if spec is not None:
# The parent import may have already imported this module.
if not is_reload and name in sys.modules:
module = sys.modules[name]
try:
__spec__ = module.__spec__
except AttributeError:
# We use the found spec since that is the one that
# we would have used if the parent module hadn't
# beaten us to the punch.
return spec
else:
if __spec__ is None:
return spec
else:
return __spec__
else:
return spec
else:
return None
def _sanity_check(name, package, level):
"""Verify arguments are "sane"."""
if not isinstance(name, str):
raise TypeError('module name must be str, not {}'.format(type(name)))
if level < 0:
raise ValueError('level must be >= 0')
if package:
if not isinstance(package, str):
raise TypeError('__package__ not set to a string')
elif package not in sys.modules:
msg = ('Parent module {!r} not loaded, cannot perform relative '
'import')
raise SystemError(msg.format(package))
if not name and level == 0:
raise ValueError('Empty module name')
_ERR_MSG_PREFIX = 'No module named '
_ERR_MSG = _ERR_MSG_PREFIX + '{!r}'
def _find_and_load_unlocked(name, import_):
path = None
parent = name.rpartition('.')[0]
if parent:
if parent not in sys.modules:
_call_with_frames_removed(import_, parent)
# Crazy side-effects!
if name in sys.modules:
return sys.modules[name]
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {!r} is not a package').format(name, parent)
raise ImportError(msg, name=name)
spec = _find_spec(name, path)
if spec is None:
raise ImportError(_ERR_MSG.format(name), name=name)
else:
module = _SpecMethods(spec)._load_unlocked()
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
setattr(parent_module, name.rpartition('.')[2], module)
return module
def _find_and_load(name, import_):
"""Find and load the module, and release the import lock."""
with _ModuleLockManager(name):
return _find_and_load_unlocked(name, import_)
def _gcd_import(name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
_imp.acquire_lock()
if name not in sys.modules:
return _find_and_load(name, _gcd_import)
module = sys.modules[name]
if module is None:
_imp.release_lock()
message = ('import of {} halted; '
'None in sys.modules'.format(name))
raise ImportError(message, name=name)
_lock_unlock_module(name)
return module
def _handle_fromlist(module, fromlist, import_):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, '__path__'):
if '*' in fromlist:
fromlist = list(fromlist)
fromlist.remove('*')
if hasattr(module, '__all__'):
fromlist.extend(module.__all__)
for x in fromlist:
if not hasattr(module, x):
from_name = '{}.{}'.format(module.__name__, x)
try:
_call_with_frames_removed(import_, from_name)
except ImportError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
if str(exc).startswith(_ERR_MSG_PREFIX):
if exc.name == from_name:
continue
raise
return module
def _calc___package__(globals):
"""Calculate what __package__ should be.
__package__ is not guaranteed to be defined or could be set to None
to represent that its proper value is unknown.
"""
package = globals.get('__package__')
if package is None:
package = globals['__name__']
if '__path__' not in globals:
| package = package.rpartition('.')[0] | 8,094 | lcc_e | python | null | f7f9c73cbeb87190ba7533fe99a7e1d11d8e9b15427feb37 |
|
#!/usr/bin/env python
# vim: sw=4:ts=4:sts=4:fdm=indent:fdl=0:
# -*- coding: UTF8 -*-
#
# A sword KJV indexed search module.
# Copyright (C) 2012 Josiah Gordon <josiahg@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
copying_str = \
'''
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
'''
warranty_str = \
'''
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
'''
""" KJV indexer and search modules.
BibleSearch: Can index and search the 'KJV' sword module using different types
of searches, including the following:
Strongs number search - Searches for all verses containing either
the phrase strongs phrase, any strongs
number or a superset of the strongs
numbers.
Morphological tags search - Same as the strongs...
Word or phrase search - Same as the strongs...
Regular expression search - Searches the whole Bible using the provided
regular expression.
"""
from sys import argv, exit
from cmd import Cmd
from difflib import get_close_matches
from os import getcwd, getenv
from functools import wraps, partial
from argparse import ArgumentParser
from time import strftime, mktime, localtime
from textwrap import wrap, fill, TextWrapper, dedent
from struct import unpack
from termios import TIOCGWINSZ
from fcntl import ioctl
from collections import defaultdict
from tarfile import TarFile, TarInfo
from io import BytesIO
from contextlib import closing
from itertools import product
from xml.dom.minidom import parseString
import dbm
import os
import sys
import json
import re
import locale
VERBOSE_LEVEL = 1
COLOR_LEVEL = 3
# Setup the index path to '/userhome/.biblesearch', and if that doesn't exist
# use the current working directory.
INDEX_PATH = os.path.join(os.getenv('HOME'), '.biblesearch')
if not os.path.isdir(INDEX_PATH):
INDEX_PATH = getcwd()
# Highlight colors.
highlight_color = '\033[7m'
highlight_text = '%s\\1\033[m' % highlight_color
word_regx = re.compile(r'\b([\w-]+)\b')
# Strip previous color.
strip_color_regx = re.compile('\033\[[\d;]*m')
def info_print(data, end='\n', tag=0):
""" Print the data to stderr as info.
"""
if tag <= VERBOSE_LEVEL:
print(data, end=end, file=sys.stderr)
sys.stderr.flush()
book_list = ['Genesis', 'Exodus', 'Leviticus', 'Numbers', 'Deuteronomy',
'Joshua', 'Judges', 'Ruth', 'I Samuel', 'II Samuel', 'I Kings',
'II Kings', 'I Chronicles', 'II Chronicles', 'Ezra', 'Nehemiah',
'Esther', 'Job', 'Psalms', 'Proverbs', 'Ecclesiastes',
'Song of Solomon', 'Isaiah', 'Jeremiah', 'Lamentations',
'Ezekiel', 'Daniel', 'Hosea', 'Joel', 'Amos', 'Obadiah', 'Jonah',
'Micah', 'Nahum', 'Habakkuk', 'Zephaniah', 'Haggai', 'Zechariah',
'Malachi', 'Matthew', 'Mark', 'Luke', 'John', 'Acts', 'Romans',
'I Corinthians', 'II Corinthians', 'Galatians', 'Ephesians',
'Philippians', 'Colossians', 'I Thessalonians',
'II Thessalonians', 'I Timothy', 'II Timothy', 'Titus',
'Philemon', 'Hebrews', 'James', 'I Peter', 'II Peter', 'I John',
'II John', 'III John', 'Jude', 'Revelation of John']
# Key function used to sort a list of verse references.
def sort_key(ref):
""" Sort verses by book.
"""
try:
book, chap_verse = ref.rsplit(' ', 1)
chap, verse = chap_verse.split(':')
val = '%02d%03d%03d' % (int(book_list.index(book)), int(chap),
int(verse))
return val
except Exception as err:
print('Error sorting "%s": %s' % (ref, err), file=sys.stderr)
exit()
# def add_context(ref_set, count=0):
# """ Add count number of verses before and after each reference.
#
# """
#
# if count == 0:
# return ref_set
#
# # Make a copy to work on.
# clone_set = set(ref_set)
# for ref in ref_set:
# start = Sword.VerseKey(ref)
# end = Sword.VerseKey(ref)
# # Pass the beginning of the book.
# start.decrement()
# start.decrement(count - 1)
# # Pass the end of the book.
# end.increment()
# end.increment(count - 1)
# clone_set.update(VerseIter(start.getText(), end.getText()))
#
# return clone_set
def get_encoding():
""" Figure out the encoding to use for strings.
"""
# Hopefully get the correct encoding to use.
lang, enc = locale.getlocale()
if not lang or lang == 'C':
encoding = 'ascii'
else:
encoding = enc
return encoding
def screen_size():
""" Returns a tuple containing the hight and width of the screen in
characters (i.e. (25, 80)).
"""
get_size = lambda fd: unpack("hh", ioctl(fd, TIOCGWINSZ, '0000'))
try:
for i in [0, 1, 2]:
return get_size(i)
except Exception as err:
try:
with os.open(os.ctermid(), os.O_RDONLY) as term_fd:
return get_size(term_fd)
except:
return (25, 80)
def render_raw2(verse_text, strongs=False, morph=False):
""" Render raw verse text.
"""
strong_regx = re.compile(r'strong:([GH]\d+)', re.I)
morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I)
test_regx = re.compile(r'''
([^<]*)
<(?P<tag>seg|q|w|transChange|note)([^>]*)>
([\w\W]*?)
</(?P=tag)>
([^<]*)
''', re.I | re.X)
divname_regx = re.compile(r'''
<(?:divineName)>
([^<]*?)
([\'s]*)
</(?:divineName)>
''', re.I | re.X)
div_upper = lambda m: m.group(1).upper() + m.group(2)
marker_regx = re.compile(r'.*marker="(.)".*', re.I)
info_print(verse_text, tag=4)
def recurse_tag(text):
""" Recursively parse raw verse text using regular expressions, and
returns the correctly formatted text.
"""
v_text = ''
for match in test_regx.finditer(text):
opt, tag_name, tag_attr, tag_text, punct = match.groups()
strongs_str = ''
morph_str = ''
italic_str = '<i>%s</i>' if 'added' in tag_attr.lower() else '%s'
if 'note' in tag_name.lower() or 'study' in tag_attr.lower():
note_str = ' <n>%s</n>'
else:
note_str = '%s'
if strongs and strong_regx.search(tag_attr):
strongs_list = strong_regx.findall(tag_attr)
strongs_str = ' <%s>' % '> <'.join(strongs_list)
if morph and morph_regx.search(tag_attr):
morph_list = morph_regx.findall(tag_attr)
morph_str = ' {%s}' % '} {'.join(morph_list)
if match.re.search(tag_text):
temp_text = recurse_tag(tag_text) + strongs_str + morph_str
v_text += note_str % italic_str % (temp_text)
else:
info_print((opt, tag_name, tag_attr, tag_text, punct), tag=4)
opt = marker_regx.sub('<p>\\1</p> ', opt)
tag_text = divname_regx.sub(div_upper, tag_text)
tag_text = note_str % italic_str % tag_text
v_text += opt + tag_text + strongs_str + morph_str
v_text += punct
return v_text
return recurse_tag(verse_text)
def render_raw(verse_text, strongs=False, morph=False):
""" Render raw verse text.
"""
strong_regx = re.compile(r'strong:([GH]\d+)', re.I)
morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I)
test_regx = re.compile(r'''
([^<]*)
<(?P<tag>q|w|transChange|note)([^>]*)>
([\w\W]*?)
</(?P=tag)>
([^<]*)
''', re.I | re.X)
divname_regx = re.compile(r'''
(?:<seg>)?
<(?:divineName)>+
([^<]*?)
([\'s]*)
</(?:divineName)>
(?:</seg>)?
''', re.I | re.X)
xadded_regx = re.compile(r'<seg subType="x-added"[^>]*>([^<]*)</seg>',
re.I)
div_upper = lambda m: m.group(1).upper() + m.group(2)
marker_regx = re.compile(r'.*marker="(.)".*', re.I)
v_text = ''
info_print(verse_text, tag=4)
for match in test_regx.finditer(verse_text):
opt, tag_name, tag_attr, tag_text, punct = match.groups()
italic_str = '%s'
if match.re.search(tag_text):
if 'added' in tag_attr.lower():
italic_str = '<i>%s</i>' + punct
punct = ''
match_list = match.re.findall(tag_text + punct)
else:
match_list = [match.groups()]
temp_text = ''
for opt, tag_name, tag_attr, tag_text, punct in match_list:
info_print((opt, tag_name, tag_attr, tag_text, punct), tag=4)
tag_text = divname_regx.sub(div_upper, tag_text)
tag_text = xadded_regx.sub('<i>\\1</i>', tag_text)
if 'marker' in opt.lower():
temp_text += '<p>%s</p> ' % marker_regx.sub('\\1', opt)
opt = ''
if 'note' in tag_name.lower() or 'study' in tag_attr.lower():
temp_text += ' <n>%s</n>' % tag_text
tag_text = ''
temp_italic = '<i>%s</i>' if 'added' in tag_attr.lower() else '%s'
temp_text += temp_italic % (opt + tag_text)
if tag_name.strip().lower() in ['transchange', 'w', 'seg']:
if strong_regx.search(tag_attr) and strongs:
temp_text += \
' <%s>' % '> <'.join(strong_regx.findall(tag_attr))
if morph_regx.search(tag_attr) and morph:
temp_text += \
' {%s}' % '} {'.join(morph_regx.findall(tag_attr))
temp_text += punct
v_text += italic_str % temp_text
continue
opt, tag_name, tag_attr, tag_text, punct = match.groups()
tag_text = divname_regx.sub(
lambda m: m.group(1).upper() + m.group(2), tag_text)
if 'marker' in opt.lower():
v_text += '<p>%s</p> ' % marker_regx.sub('\\1', opt)
if 'added' in tag_attr.lower():
v_text += '<i>'
elif 'note' in tag_name.lower() or 'study' in tag_attr.lower():
v_text += ' <n>%s</n>' % tag_text
if match.re.search(tag_text):
for i in match.re.finditer(tag_text):
info_print(i.groups(), tag=4)
o, t_n, t_a, t_t, p = i.groups()
if t_n.strip().lower() in ['transchange', 'w']:
v_text += o + t_t
if strong_regx.search(t_a) and strongs:
v_text += \
' <%s>' % '> <'.join(strong_regx.findall(t_a))
if morph_regx.search(t_a) and morph:
v_text += \
' {%s}' % '} {'.join(morph_regx.findall(t_a))
v_text += p
else:
if tag_name.strip().lower() in ['transchange', 'w']:
v_text += tag_text
if strong_regx.search(tag_attr) and strongs:
v_text += \
' <%s>' % '> <'.join(strong_regx.findall(tag_attr))
if morph_regx.search(tag_attr) and morph:
v_text += \
' {%s}' % '} {'.join(morph_regx.findall(tag_attr))
if 'added' in tag_attr.lower():
v_text += '</i>'
v_text += punct
info_print('%s: %s: %s: %s: %s' % (opt, tag_name, tag_attr,
tag_text, punct), tag=4)
return v_text
def render_verses_with_italics(ref_list, wrap=True, strongs=False,
morph=False, added=True, notes=False,
highlight_func=None, module='KJV', *args):
""" Renders a the verse text at verse_ref with italics highlighted.
Returns a strong "verse_ref: verse_text"
ref_list - List of references to render
wrap - Whether to wrap the text.
strongs - Include Strong's Numbers in the output.
morph - Include Morphological Tags in the output.
added - Include added text (i.e. italics) in the output.
notes - Include study notes at the end of the text.
highlight_func - A function to highlight anything else
(i.e. search terms.)
module - Sword module to render from.
*args - Any additional arguments to pass to
hightlight_func
highlight_func should take at least three arguments, verse_text,
strongs, and morph.
"""
# Set the colors of different items.
end_color = '\033[m'
# Build replacement strings that highlight Strong's Numbers and
# Morphological Tags.
if COLOR_LEVEL >= 2:
# The Strong's and Morphology matching regular expressions.
# Match strongs numbers.
strongs_regx = re.compile(r'''
<((?:\033\[[\d;]*m)*?[GH]?\d+?(?:\033\[[\d;]*m)*?)>
''', re.I | re.X)
# It needs to match with braces or it will catch all capitalized
# word and words with '-'s in them.
info_print("Rendering results, please wait...\n", tag=0)
morph_regx = re.compile(r'''
\{((?:\033\[[\d+;]*m)*?[\w-]*?(?:\033\[[\d+;]*m)*?)\}
''', re.X)
strongs_color = '\033[36m'
morph_color = '\033[35m'
strongs_highlight = '<%s\\1%s>' % (strongs_color, end_color)
morph_highlight = '{%s\\1%s}' % (morph_color, end_color)
if COLOR_LEVEL >= 0:
ref_color = '\033[32m'
ref_highlight = '%s\\1%s' % (ref_color, end_color)
if COLOR_LEVEL >= 1 and added:
italic_color = '\033[4m'
italic_regx = re.compile(r'<i>\s?(.*?)\s?</i>', re.S)
italic_highlight = '%s\\1%s' % (italic_color, end_color)
# Get the local text encoding.
encoding = get_encoding()
# A substitution replacement function for highlighting italics.
def italic_color(match):
""" Color italic text, but first remove any previous color.
"""
# Strip any previous colors.
match_text = strip_color_regx.sub('', match.groups()[0])
# Color the italics.
return word_regx.sub(italic_highlight, match_text)
# Get an iterator over all the requested verses.
verse_iter = IndexedVerseTextIter(iter(ref_list), strongs, morph,
italic_markers=(COLOR_LEVEL >= 1),
added=added, paragraph=added,
notes=notes, module=module)
for verse_ref, verse_text in verse_iter:
if VERBOSE_LEVEL >= 30:
len_longest_key = len(max(verse_text[1].keys(), key=len))
for key, value in verse_text[1].items():
print('\033[33m{0:{1}}\033[m: {2}'.format(key,
len_longest_key,
value))
verse_text = verse_text[1]['_verse_text'][0]
# Encode than decode the verse text to make it compatable with
# the locale.
verse_text = verse_text.strip().encode(encoding, 'replace')
verse_text = verse_text.decode(encoding, 'replace')
verse_text = '%s: %s' % (verse_ref, verse_text)
# The text has to be word wrapped before adding any color, or else the
# color will add to the line length and the line will wrap too soon.
if wrap:
verse_text = fill(verse_text, screen_size()[1],
break_on_hyphens=False)
if COLOR_LEVEL >= 0:
# Color the verse reference.
colored_ref = word_regx.sub(ref_highlight, verse_ref)
verse_text = re.sub(verse_ref, colored_ref, verse_text)
if COLOR_LEVEL >= 1 and added:
# Highlight the italic text we previously pulled out.
verse_text = italic_regx.sub(italic_color, verse_text)
if COLOR_LEVEL >= 2:
# Highlight Strong's and Morphology if they are visible.
if strongs:
verse_text = strongs_regx.sub(strongs_highlight, verse_text)
if morph:
verse_text = morph_regx.sub(morph_highlight, verse_text)
if COLOR_LEVEL >= 3:
# Highlight the different elements.
if highlight_func:
verse_text = highlight_func(verse_text, strongs, morph, *args)
# Finally produce the formated text.
yield verse_text
def highlight_search_terms(verse_text, strongs, morph, regx_list, flags):
""" Highlight search terms in the verse text.
"""
def highlight_group(match):
""" Highlight each word/Strong's Number/Morphological Tag in the
match.
"""
match_text = match.group()
for word in match.groups():
if word:
info_print(word, tag=20)
try:
match_text = re.sub('''
(
(?:\033\[[\d+;]*m|\\b)+
%s
(?:\033\[[\d+;]*m|\\b)+
)
''' % re.escape(word),
highlight_text, match_text, flags=re.X)
except Exception as err:
info_print("Error with highlighting word %s: %s" % \
(word, err), tag=4)
#match_text = match_text.replace(word, '\033[7m%s\033[m' % word)
#print(match_text)
return match_text
# Strip any previous colors.
match_text = strip_color_regx.sub('', match.group())
return word_regx.sub(highlight_text, match_text)
verse_text = verse_text.strip()
# Apply each highlighting regular expression to the text.
for regx in regx_list:
verse_text = regx.sub(highlight_group, verse_text)
return verse_text
def build_highlight_regx(search_list, case_sensitive, sloppy=False):
""" Build a regular expression and highlight string to colorize the
items in search_list as they appear in a verse.
"""
if not search_list:
return []
regx_list = []
# Extra word boundry to catch ansi color escape sequences.
word_bound = '(?:\033\[[\d;]*m|\\\\b)+'
# Extra space filler to pass over ansi color escape sequences.
extra_space = '|\033\[[\d;]*m|\033'
for item in search_list:
item = item.strip()
is_regex = (('*' in item and ' ' not in item) or item.startswith('&'))
if ('*' in item and ' ' not in item) and not item.startswith('&'):
# Build a little regular expression to highlight partial words.
item = item[1:] if item[0] in '!^+|' else item
item = item.replace('*', '\w*')
item = r'{0}({1}){0}'.format('(?:\033\[[\d;]*m|\\b)+', item)
if item.startswith('&'):
# Just use a regular expression. ('&' marks the term as a regular
# expression.)
item = item[1:]
regx_list.append(Search.search_terms_to_regex(item, case_sensitive,
word_bound=word_bound, extra_space=extra_space,
sloppy=(sloppy or '~' in item), is_regex=is_regex))
return regx_list
# class Verse(object):
# """ Manipulate Verse references for adding range and context.
#
# """
#
# def __init__(self, ref):
# """ Verse(ref) -> Use this reference as a base.
#
# """
#
# self._ref = ref
# self._book, chap_verse = ref.rsplit(' ', 1)
# self._chap, self._verse = chap_verse.split(':')
#
# def decrement(self, count=1):
# """ Decrement the current reference by count.
#
# """
#
# if self._chap == 1 and self._verse == 1:
# book_index = book_list.index(self._book)
# if book_index != 0:
# self._book = book_list[book_index - 1]
class StdoutRedirect(object):
""" Redirect stdout to a specified output function.
"""
def __init__(self, output_func, *args):
""" Set the output function and get the extra arguments to pass to it.
"""
self._output_func = output_func
self._args = args
self._old_stdout = sys.stdout
def write(self, data):
""" Write data to the output function.
"""
if data.strip():
self._output_func(data, *self._args)
def __enter__(self):
""" Change sys.stdout to this class.
"""
try:
sys.stdout = self
return self
except Exception as err:
print("Error in __enter__: %s" % err, file=sys.stderr)
return None
def __exit__(self, exc_type, exc_value, traceback):
""" Change sys.stdout back to its old value.
"""
try:
sys.stdout = self._old_stdout
if exc_type:
return False
return True
except Exception as err:
print("Error in __exit__: %s" % err, file=sys.stderr)
return False
class IndexedVerseTextIter(object):
""" An iterable object for accessing verses in the Bible. Maybe it will
be easier maybe not.
"""
def __init__(self, reference_iter, strongs=False, morph=False,
module='KJV', italic_markers=False, added=True,
paragraph=True, notes=False, path=''):
""" Initialize.
"""
reg_list = []
if not strongs:
reg_list.append(r'\s*<([GH]\d+)>')
if not morph:
reg_list.append(r'\s*\{([\w-]+)\}')
if not added:
reg_list.append(r'\s?<i>\s?(.*?)\s?</i>')
if not italic_markers:
reg_list.append(r'(<i>\s?|\s?</i>)')
if not paragraph:
reg_list.append(r'\s?<p>\s?(.*?)\s?</p>')
else:
reg_list.append(r'(<p>\s?|\s?</p>)')
reg_str = r'(?:%s)' % r'|'.join(reg_list)
self._clean_regex = re.compile(reg_str, re.S)
self._notes_regex = re.compile(r'\s?<n>\s?(.*?)\s?</n>', re.S)
self._notes_str = ' (Notes: \\1)' if notes else ''
self._index_dict = IndexDict('%s' % module, path)
self._ref_iter = reference_iter
def next(self):
""" Returns the next verse reference and text.
"""
return self.__next__()
def __next__(self):
""" Returns a tuple of the next verse reference and text.
"""
# Retrieve the next reference.
verse_ref = next(self._ref_iter)
# Set the verse and render the text.
verse_text = self._get_text(verse_ref)
return (verse_ref, verse_text.strip())
def __iter__(self):
""" Returns an iterator of self.
"""
return self
def _get_text(self, verse_ref):
""" Returns the verse text. Override this to produce formatted verse
text.
"""
verse_text = self._index_dict[verse_ref]
verse_text = self._clean_regex.sub('', verse_text)
verse_text = self._notes_regex.sub(self._notes_str, verse_text)
return verse_text
class IndexDbm(object):
""" A dbm database writer.
"""
def __init__(self, name=None, mode='r'):
""" Create a databse.
"""
self._dbm = dbm.open(name, mode)
def _encoding(self):
""" Figure out the encoding to use for strings.
"""
# Hopefully get the correct encoding to use.
lang, enc = locale.getlocale()
if not lang or lang == 'C':
encoding = 'ascii'
else:
encoding = enc
return encoding
def firstkey(self):
""" Return the first key.
"""
key = self._dbm.firstkey()
if key:
key = key.decode(self._encoding(), 'replace')
return key
def nextkey(self, key):
""" Returns the next key from key.
"""
key = key.encode(self._encoding(), 'replace')
return_key = self._dbm.nextkey(key)
if return_key:
return_key = return_key.decode(self._encoding(), 'replace')
return return_key
def set(self, key, value):
""" Write the list database under the given name.
"""
# Encode the buffer into bytes.
byte_buffer = json.dumps(value).encode(self._encoding(), 'replace')
# Write buffer to tar file.
self._dbm[key] = byte_buffer
return len(byte_buffer)
def get(self, key, default=[]):
""" Read the named list out of the database.
"""
try:
str_buffer = self._dbm[key].decode(self._encoding(), 'replace')
return json.loads(str_buffer)
except Exception as err:
#print("Error reading %s: %s" % (key, err), file=sys.stderr)
return default
def update(self, dic):
""" Write a dictionary to the database.
"""
for k, v in dic.items():
self.set(k, v)
return len(dic)
def read_dict(self):
""" Read out the entire dictionary.
"""
temp_dict = {}
key = self._dbm.firstkey()
while key:
temp_dict[key] = self._dbm[key]
key = self._dbm.nextkey(key)
return temp_dict
def __enter__(self):
""" Add the functionality to use pythons with statement.
"""
try:
return self
except Exception as err:
print("Error in __enter__: %s" % err, file=sys.stderr)
return None
def __exit__(self, exc_type, exc_value, traceback):
""" Close the file and exit.
"""
try:
self._dbm.close()
if exc_type:
return False
return True
except Exception as err:
print("Error in __exit__: %s" % err, file=sys.stderr)
return False
class IndexDict(dict):
""" A Bible index container, that provides on-demand loading of indexed
items.
"""
def __init__(self, name='', path=''):
""" Initialize the index.
"""
self._non_key_text_regx = re.compile(r'[<>\{\}]')
self._name = name
self._path = path if path else INDEX_PATH
self._lower_case = self.get('lower_case', {})
super(IndexDict, self).__init__()
# In case we need to access the name externally we don't want it changed.
name = property(lambda self: self._name)
def __getitem__(self, key):
""" If a filename was given then use it to retrieve keys when
they are needed.
"""
# Cleanup Strong's and Morphology
key = self._non_key_text_regx.sub('', key).strip()
if self._name and (key not in self):
# Load the value from the database if we don't have it.
try:
dbm_name = '%s/%s_index_i.dbm' % (self._path, self._name)
with IndexDbm(dbm_name, 'r') as dbm_dict:
self[key] = dbm_dict.get(key)
except Exception as err:
print("The index is either broken or missing.", \
file=sys.stderr)
print("Please fix it. Re-build the index.", file=sys.stderr)
print("The error was: %s" % err, file=sys.stderr)
exit()
return super(IndexDict, self).__getitem__(key)
def get(self, key, default=[]):
""" Returns the value associated with key, or default.
"""
value = self[key]
return value if value else default
def keys(self):
""" Yields each key.
"""
dbm_name = '%s/%s_index_i.dbm' % (self._path, self._name)
with IndexDbm(dbm_name, 'r') as dbm_dict:
key = dbm_dict.firstkey()
while key:
yield key
key = dbm_dict.nextkey(key)
def value_intersect(self, key_list, case_sensitive=False):
""" Returns a set with only the verses that contain all the items in
search_list.
"""
# There may be a better way to do this. Start with a set of the
# verses containing the least common item, then update it with the
# intersections it has with the sets of the remaining words.
# Effectively removing any verse from the original set that does not
# contain all the other search items.
result_set = set()
for word in key_list:
temp_set = set(self[word])
# When its not a case sensitive search, combine all the references
# that contain word in any form.
if not case_sensitive:
# If word is 'the', u_word could be in ['The', 'THE'], so
# get the list of references that contain those words and
# combine them with the set of references for word.
temp_set.update(self[word.lower()])
for u_word in self._lower_case.get(word.lower(), []):
temp_set.update(self[u_word])
if result_set:
result_set.intersection_update(temp_set)
else:
result_set.update(temp_set)
return result_set
def value_sym_diff(self, key_list, case_sensitive=False):
""" Finds the symmetric difference of all the references that contain
the keys in key_list. (only a set with either or not both keys)
"""
# Create an either or set.
verse_set = set()
for item in key_list:
if not case_sensitive:
verse_set.symmetric_difference_update(self[item.lower()])
for word in self._lower_case.get(item.lower(), []):
verse_set.symmetric_difference_update(self[word])
verse_set.symmetric_difference_update(self[item])
return verse_set
def value_union(self, key_list, case_sensitive=False):
""" Returns a set with all the verses that contain each item in
search_list.
"""
# Create one big set of all the verses that contain any one or more of
# the search items.
verse_set = set()
for item in key_list:
if not case_sensitive:
verse_set.update(self[item.lower()])
for word in self._lower_case.get(item.lower(), []):
verse_set.update(self[word])
verse_set.update(self[item])
return verse_set
def from_partial(self, partial_list, case_sensitive=False,
common_limit=31103):
""" Returns a set of verses that have any the partial words in the
list.
"""
flags = re.I if not case_sensitive else 0
verse_set = set()
# Search through each word key in the index for any word that contains
# the partial word.
for word in self['_words_']:
for partial_word in partial_list:
# A Regular expression that matches any number of word
# characters for every '*' in the term.
reg_str = '\\b%s\\b' % partial_word.replace('*', '\w*')
try:
word_regx = re.compile(reg_str, flags)
except Exception as err:
print('There is a problem with the regular '
'expression %s: %s' % (reg_str, err),
file=sys.stderr)
exit()
if word_regx.match(word):
temp_list = self[word]
if len(temp_list) < common_limit:
verse_set.update(temp_list)
return verse_set
class CombinedParse(object):
""" A parser for simple combined search parsing.
((in OR tree) AND the) AND (house OR bush) =>
['in the house', 'in the bush', 'tree the house', 'tree the bush']
Also it has a NOT word list.
created NOT (and OR but) => ['created'] ['and', 'but']
"""
def __init__(self, arg_str):
""" Initialize the parser and parse the arg string.
"""
self._arg_str = arg_str
self._arg_list = arg_str.split()
parsed_list = self.parse_string(list(arg_str))
self._word_list, self._not_list = self.parse_list(parsed_list)
# Make the results accesable via read-only properties.
word_list = property(lambda self: self._word_list)
not_list = property(lambda self: self._not_list)
def parse_list(self, arg_list):
""" Parse a list such as ['created', 'NOT', ['and', 'OR', 'but']] into
search_args = ['created'] not_list = ['and', 'but']
"""
# The list we're working on building.
working_list = []
# The list of words not to include.
not_list = []
for i in arg_list:
# Skip 'OR's
if i == 'OR':
continue
if isinstance(i, list):
# A list was found so parse it and get the results.
temp_list, temp_not_list = self.parse_list(i)
# Add the returned not list to the current not list.
not_list.extend(temp_not_list)
if working_list:
if working_list[-1] == 'AND':
# Pop the 'AND' off the end of the list.
working_list.pop()
# Combine each element of the working listh with each
# element of the returned list replace the working
# list with those combinations.
# (i.e. working_list = ['this', 'that']
# temp_list = ['tree', 'house']
# result = ['this tree', 'this house',
# 'that tree', 'that house']
working_list = ['%s %s' % j \
for j in product(working_list, temp_list)]
elif working_list[-1] == 'NOT':
# Take the 'NOT' off to show we've processed it.
working_list.pop()
# Add the returned list to the NOT list.
not_list.extend(temp_list)
else:
# Just extend the working list with the retuned list.
working_list.extend(temp_list)
else:
# Just extend the working list with the retuned list.
working_list.extend(temp_list)
else:
if i == 'AND':
# Put the 'AND' on the list for later processing.
working_list.append(i)
elif working_list:
if working_list[-1] == 'AND':
# Take the 'AND' off the list.
working_list.pop()
# Combine all the elements of working_list with i, and
# replace working list with the resulting list.
# (i.e. working_list = ['he', 'it'] i = 'said'
# result = ['he said', 'it said']
working_list = ['%s %s' % (j, i) for j in working_list]
elif working_list[-1] == 'NOT':
# Remove the 'NOT'.
working_list.pop()
# Add the word to the not list.
not_list.append(i)
else:
# Add the word to the working list.
working_list.append(i)
else:
# Add the word to the working list.
working_list.append(i)
# Split and then combine all the strings in working_list.
# Basically removes runs of whitespace.
working_list = [' '.join(i.split()) for i in working_list]
# Return the final list and not list.
return working_list, not_list
def parse_parenthesis(self, arg_list):
""" Recursively processes strings in parenthesis converting them
to nested lists of strings.
"""
# The return list.
return_list = []
# Temorary string.
temp_str = ''
while arg_list:
# Get the next character.
c = arg_list.pop(0)
if c == '(':
# An opening parenthesis was found so split the current string
# at the spaces putting them in the return list, and clean
# the string.
if temp_str:
return_list.extend(temp_str.split())
temp_str = ''
# Process from here to the closing parenthesis.
return_list.append(self.parse_parenthesis(arg_list))
elif c == ')':
# The parenthesis is closed so return back to the calling
# function.
break
else:
# Append the current not parenthesis character to the string.
temp_str += c
if temp_str:
# Split and add the string to the return list.
return_list.extend(temp_str.split())
# Return what we found.
return return_list
def parse_string(self, arg_list):
""" Parse a combined search arg string. Convert a string such as:
'created NOT (and OR but)' => ['created', 'NOT', ['and', 'OR', 'but']]
"""
# This does the same thing only using json.
#
# Regular expression to group all words.
#word_regx = re.compile(r'\b(\w*)\b')
# Put quotes around all words and opening replace paranthesis with
# brackets, put all of that in brackets.
#temp_str = '[%s]' % word_regx.sub('"\\1"', arg_str).replace('(', '[')
# Replace closing parenthesis with brackets and replace a '" ' with
# '", '.
#temp_str = temp_str.replace(')', ']').replace('" ', '",')
# finally replace '] ' with '], '. The end result should be a valid
# json string that can be converted to a list.
#temp_str = temp_str.replace('] ', '],')
# Convert the string to a list.
#return_list = json.loads(temp_str)
#return return_list
# The return list.
return_list = []
# Temporary string.
temp_str = ''
while arg_list:
# Pop the next character.
c = arg_list.pop(0)
if c == '(':
# A parenthesis was found store and reset the string.
# And parse the what is in the parenthesis.
if temp_str:
return_list.extend(temp_str.split())
temp_str = ''
return_list.append(self.parse_parenthesis(arg_list))
else:
# Append the non parenthesis character to the string.
temp_str += c
if temp_str:
# Store the final string in the list.
return_list.extend(temp_str.split())
#info_print(return_list)
# Return the list.
return return_list
class Search(object):
""" Provides a simple way of searching an IndexDict for verses.
"""
# To check for spaces.
_whitespace_regx = re.compile(r'\s')
# Cleanup regular expressions.
_non_alnum_regx = re.compile(r'[^\w\*<>\{\}\(\)-]')
_fix_regx = re.compile(r'\s+')
# Match strongs numbers.
_strongs_regx = re.compile(r'[<]?(\b[GH]\d+\b)[>]?', re.I)
# It needs to match with braces or it will catch all capitalized
# word and words with '-'s in them.
_morph_regx = re.compile(r'[\(\{](\b[\w-]+\b)[\}\)]', re.I)
_word_regx = re.compile(r'\b([\w\\-]+)\b')
_space_regx = re.compile(r'\s+')
_non_word_regx = re.compile(r'[<>\(\)]')
_fix_strongs = classmethod(lambda c, m: '<%s>' % m.groups()[0].upper())
_fix_morph = classmethod(lambda c, m: '{%s}' % m.groups()[0].upper())
# Escape the morphological tags.
_escape_morph = classmethod(lambda c, m: \
'\{%s\}' % re.escape(m.groups()[0]).upper())
def __init__(self, module='KJV', path='', multiword=False):
""" Initialize the search.
"""
# The index dictionary.
self._index_dict = IndexDict(module, path)
self._module_name = module
self._multi = multiword
@classmethod
def search_terms_to_regex(cls, search_terms, case_sensitive,
word_bound='\\\\b', extra_space='',
sloppy=False, is_regex=False):
""" Build a regular expression from the search_terms to match a verse
in the Bible.
"""
# Set the flags for the regular expression.
flags = re.I if not case_sensitive else 0
if is_regex:
reg_str = search_terms
info_print('\nUsing regular expression: %s\n' % reg_str, tag=2)
try:
return re.compile(reg_str, flags)
except Exception as err:
print("An error occured while compiling the highlight "
"regular expression %s: %s." % (reg_str, err),
" There will be no highlighting.\n", file=sys.stderr)
return re.compile(r'')
# This will skip words.
not_words_str = r'\b\w+\b'
# This will skip Strong's Numbers.
not_strongs_str = r'<[^>]*>'
# This wil skip Morphological Tags.
not_morph_str = r'\{[^\}]*\}'
# This will skip all punctuation. Skipping ()'s is a problem for
# searching Morphological Tags, but it is necessary for the
# parenthesized words. May break highlighting.
not_punct_str = r'[\s,\?\!\.;:\\/_\(\)\[\]"\'-]'
# This will skip ansi color.
not_color_str = r'\033\[[\d;]*m'
# Match all *'s
star_regx = re.compile(r'\*')
# Hold the string that fills space between search terms.
space_str = ''
# Get stars past so we can replace them with '\w*' later.
temp_str, word_count = star_regx.subn(r'_star_', search_terms)
# Hack to get rid of unwanted characters.
temp_str = cls._non_alnum_regx.sub(' ', temp_str).split()
temp_str = ' '.join(temp_str)
# Phrases will have spaces in them
phrase = bool(cls._whitespace_regx.search(temp_str))
# Escape the morphological tags, and also find how many there are.
temp_str, morph_count = cls._morph_regx.subn(cls._escape_morph,
temp_str)
# Make all Strong's Numbers uppercase, also find how many there are.
temp_str, strongs_count = cls._strongs_regx.subn(cls._fix_strongs,
temp_str)
# Select all words.
#repl = '(\\\\b\\1\\\\b)'
# This works:
# temp_str, word_count = \
# cls._word_regx.subn('{0}(\\1){0}'.format(word_bound), temp_str)
repl = '({0}(\\1){0})'.format(word_bound)
temp_str, word_count = cls._word_regx.subn(repl, temp_str)
# Replace what used to be *'s with '\w*'.
temp_str = temp_str.replace('_star_', '\w*')
# All the Strong's and Morphology were changed in the previous
# substitution, so if that number is greater than the number of
# Strong's plus Morphology then there were words in the search terms.
# I do this because I don't know how to only find words.
words_found = (strongs_count + morph_count) < word_count
if phrase:
# Build the string that is inserted between the items in the
# search string.
space_str = r'(?:%s%s' % (not_punct_str, extra_space)
if not bool(strongs_count) or sloppy:
# Skip over all Strong's Numbers.
space_str = r'%s|%s' % (space_str, not_strongs_str)
if not bool(morph_count) or sloppy:
# Skip all Morphological Tags.
space_str = r'%s|%s' % (space_str, not_morph_str)
if not words_found or bool(morph_count) or bool(strongs_count) or \
sloppy:
# Skip words. If word attributes are in the search we can
# skip over words and still keep it a phrase.
space_str = r'%s|%s' % (space_str, not_words_str)
# Finally make it not greedy.
space_str = r'%s)*?' % space_str
else:
space_str = ''
# Re-combine the search terms with the regular expression string
# between each element.
reg_str = space_str.join(temp_str.split())
info_print('\nUsing regular expression: %s\n' % reg_str, tag=2)
try:
return re.compile(reg_str, flags)
except Exception as err:
print("An error occured while compiling the highlight "
"regular expression %s: %s." % (reg_str, err),
" There will be no highlighting.\n", file=sys.stderr)
return re.compile(r'')
def _sorted_iter(self, verse_ref_set):
""" Returns an iterator over a sorted version of verse_ref_set.
"""
# Speed up the iteration by first sorting the range.
return iter(sorted(verse_ref_set, key=sort_key))
def _clean_text(self, text):
""" Return a clean (only alphanumeric) text of the provided string.
"""
# Do we have to use two regular expressions to do this.
# Replace all non-alphanumeric characters with a space.
temp_text = self._non_alnum_regx.sub(' ', text)
# Replace one or more spaces with one space.
clean_text = self._fix_regx.sub(' ', temp_text)
return clean_text.strip()
def _fix_strongs_morph(self, search_terms):
""" Make any Strong's or Morphology uppercase, put parenthesis around
the Morphological Tags, and put <>'s around the Strong's Numbers.
"""
# Capitalize all strongs numbers and remove the <> from them.
temp_str = self._strongs_regx.sub(self._fix_strongs, search_terms)
# Capitalize all morphological tags and make sure they are in
# parenthesis.
temp_str = self._morph_regx.sub(self._fix_morph, temp_str)
return temp_str
def _process_search(func):
""" Returns a wrapper function that processes the search terms, calls
the wrapped function, and, if applicable, confines the resulting verse
set to a range.
"""
@wraps(func)
def wrapper(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" Process the search terms according to the wrapped functions
requirements, then apply the range, if given, to the returned set
of verses.
"""
if not isinstance(search_terms, str):
# Combine the terms for use by the different methods.
search_terms = ' '.join(search_terms)
# Get a valid set of verse references that conform to the passed
# range.
range_set = {} #parse_verse_range(range_str)
if func.__name__ not in ['regex_search', 'partial_word_search']:
# Try to catch and fix any Strong's Numbers or Morphological
# Tags.
search_terms = self._fix_strongs_morph(search_terms)
# Regular expression and combined searches get the search terms as
# they were passed.
if func.__name__ in ['multiword_search', 'anyword_search',
'phrase_search', 'mixed_phrase_search']:
# Get rid of any non-alphanumeric or '-' characters from
# the search string.
search_str = self._clean_text(search_terms).strip()
if strongs or morph:
# Strong's numbers and Morphological tags are all
# uppercase. This is only required if the Morphological
# Tags were not surrounded by parenthesis.
search_str = search_str.upper().strip()
else:
search_str = search_terms
# Get the set of found verses.
found_set = func(self, search_str, strongs, morph, added,
case_sensitive, range_set)
# The phrase, regular expression, and combined searches apply the
# range before searching, so only multi-word and any-word searches
# have it applied here.
if func.__name__ in ['multiword_search', 'anyword_search',
'partial_word_search']:
if range_set:
found_set.intersection_update(range_set)
return found_set
# Return wrapper function.
return wrapper
@_process_search
def combined_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" combined_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str=''): ->
Perform a combined search. Search terms could be
'created NOT (and OR but)' and it would find all verses with the word
'created' in them and remove any verse that had either 'and' or 'but.'
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for '%s'..." % search_terms, tag=1)
# Process the search_terms.
arg_parser = CombinedParse(search_terms)
# Get the list of words and/or phrases to include.
word_list = arg_parser.word_list
# Get the list of words and/or phrases to NOT include.
not_list = arg_parser.not_list
phrase_search = self.phrase_search
multiword_search = self.multiword_search
def combine_proc(str_list):
""" Performs combined search on the strings in str_list, and
returns a set of references that match.
"""
and_it = False
temp_set = set()
for word in str_list:
# A '+' before or after a word means it should have a phrase
# search done on it and the words with it.
if '+' in word:
# Do a phrase search on the word string.
result_set = phrase_search(word.replace('+', ' '), strongs,
morph, case_sensitive,
range_str)
elif word == '&':
# Combine the next search results with this one.
and_it = True
continue
else:
# Do a multi-word search on the word string.
result_set = multiword_search(word, strongs, morph,
case_sensitive, range_str)
if and_it:
# The previous word said to find verses that match both.
temp_set.intersection_update(result_set)
and_it = False
else:
# Only keep the verses that have either one group or the
# other but not both.
temp_set.symmetric_difference_update(result_set)
return temp_set
# Remove any verses that have the NOT words in them.
found_set = combine_proc(word_list).difference(combine_proc(not_list))
return found_set
@_process_search
def combined_phrase_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" combined_phrase_search(self, search_terms, strongs=False,
morph=False, case_sensitive=False, range_str=''): ->
Perform a combined phrase search. Search terms could be
'created NOT (and AND but)' and it would find all verses with the word
'created' in them and remove any verse that had the phrase 'and but.'
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for '%s'..." % search_terms, tag=1)
# Process the search_terms.
arg_parser = CombinedParse(search_terms)
# Get the list of words and/or phrases to include.
word_list = arg_parser.word_list
# Get the list of words and/or phrases to NOT include.
not_list = arg_parser.not_list
phrase_search = self.phrase_search
def combine_proc(str_list):
""" Performs combined phrase search on the strings in str_list, and
returns a set of references that match.
"""
temp_set = set()
for word in str_list:
# Do a phrase search on the word string.
result_set = phrase_search(word.replace('+', ' '), strongs,
morph, case_sensitive,
range_str)
# Include all the verses that have any of the word groups.
temp_set.update(result_set)
return temp_set
# Remove any verses that have the NOT words in them.
found_set = combine_proc(word_list).difference(combine_proc(not_list))
return found_set
@_process_search
def multiword_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" multiword_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a multiword search using the search_terms.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with all these words "
"'%s'..." % ', '.join(search_terms.split()), tag=1)
# All that needs to be done is find all references with all the
# searched words in them.
found_set = self._index_dict.value_intersect(search_terms.split(),
case_sensitive)
return found_set
@_process_search
def eitheror_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" eitheror_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a search returning any verse with one and only one of the terms
searched for.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with one and not all of these words "
"'%s'..." % ', '.join(search_terms.split()), tag=1)
# Any verse with one and only one of the searched words.
found_set = self._index_dict.value_sym_diff(search_terms.split(),
case_sensitive)
return found_set
@_process_search
def anyword_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" anyword_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a search returning any verse with one or more of the search
terms.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with any of these words "
"'%s'..." % ', '.join(search_terms.split()), tag=1)
# Any verse with one or more of the searched words.
found_set = self._index_dict.value_union(search_terms.split(),
case_sensitive)
return found_set
@_process_search
def partial_word_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" partial_word_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a search returning any verse with one or more words matching
the partial words given in the search terms. Partial words are markes
tih *'s (e.g. '*guil*' will match any word with 'guil' in it such as
'guilt' or 'beguile.'
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with any of these partial words "
"'%s'..." % ', '.join(search_terms.split()), tag=1)
#found_set = self._index_dict.value_union(
#self._words_from_partial(search_terms, case_sensitive),
#case_sensitive)
search_list = search_terms.split()
found_set = self._index_dict.from_partial(search_list, case_sensitive)
return found_set
def _words_from_partial(self, partial_word_list, case_sensitive=False):
""" Search through a list of partial words and yield words that match.
"""
flags = re.I if not case_sensitive else 0
# Split the search terms and search through each word key in the index
# for any word that contains the partial word.
word_list = partial_word_list.split()
for word in self._index_dict['_words_']:
for partial_word in word_list:
# A Regular expression that matches any number of word
# characters for every '*' in the term.
reg_str = '\\b%s\\b' % partial_word.replace('*', '\w*')
try:
word_regx = re.compile(reg_str, flags)
except Exception as err:
print('There is a problem with the regular expression '
'%s: %s' % (reg_str, err), file=sys.stderr)
exit()
if word_regx.match(word):
yield word
def _process_phrase(func):
""" Returns a wrapper function for wrapping phrase like searches.
"""
@wraps(func)
def wrapper(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" Gets a regular expression from the wrapped function, then
builds a set of verse references to search, finally it calls the
searching function with the regular expression and the verse
reference iterator, and returns the resulting set of references.
"""
search_regx = func(self, search_terms, strongs, morph, added,
case_sensitive, range_str)
# First make sure we are only searching verses that have all the
# search terms in them.
search_list = search_terms.split()
if '*' in search_terms:
ref_set = self._index_dict.from_partial(search_list,
case_sensitive,
common_limit=5000)
else:
ref_set = self._index_dict.value_intersect(search_list,
case_sensitive)
if range_str:
# Only search through the supplied range.
ref_set.intersection_update(range_str)
# No need to search for a single word phrase.
if len(search_terms.split()) == 1:
return ref_set
# Sort the list so it may be a little faster. Only needed if we're
# using the sword module to look them up.
ref_iter = self._sorted_iter(ref_set)
# Disable Strong's and Morphological if only words are used.
strongs = bool(self._strongs_regx.search(search_terms))
morph = bool(self._morph_regx.search(search_terms))
return self.find_from_regex(ref_iter, search_regx, strongs, morph)
return wrapper
@_process_search
@_process_phrase
def ordered_multiword_search(self, search_terms, strongs=False,
morph=False, added=True, case_sensitive=False,
range_str=''):
""" ordered_multiword_search(self, search_terms, strongs=False,
morph=False, case_sensitive=False, range_str='') ->
Perform an ordered multiword search. Like a multiword search, but all
the words have to be in order.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with these words in order "
"'%s'..." % search_terms, tag=1)
return self.search_terms_to_regex(search_terms, case_sensitive,
sloppy=True)
@_process_search
@_process_phrase
def phrase_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" phrase_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a phrase search.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with this phrase "
"'%s'..." % search_terms, tag=1)
# Make all the terms the same case if case doesn't matter.
flags = re.I if not case_sensitive else 0
if strongs:
# Match strongs phrases.
search_reg_str = search_terms.replace(' ', r'[^<]*')
elif morph:
# Match morphological phrases.
search_reg_str = search_terms.replace(' ', r'[^\{]*')
else:
# Match word phrases
| search_reg_str = '\\b%s\\b' % search_terms.replace(' ', | 11,879 | lcc_e | python | null | 927dbd70a755e5e339b4ffbb4cb56098b546fbec2b368570 |
|
## @file
# parse FDF file
#
# Copyright (c) 2007 - 2010, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import re
import os
import CommonDataClass.FdfClass
##define T_CHAR_SPACE ' '
##define T_CHAR_NULL '\0'
##define T_CHAR_CR '\r'
##define T_CHAR_TAB '\t'
##define T_CHAR_LF '\n'
##define T_CHAR_SLASH '/'
##define T_CHAR_BACKSLASH '\\'
##define T_CHAR_DOUBLE_QUOTE '\"'
##define T_CHAR_SINGLE_QUOTE '\''
##define T_CHAR_STAR '*'
##define T_CHAR_HASH '#'
(T_CHAR_SPACE, T_CHAR_NULL, T_CHAR_CR, T_CHAR_TAB, T_CHAR_LF, T_CHAR_SLASH, \
T_CHAR_BACKSLASH, T_CHAR_DOUBLE_QUOTE, T_CHAR_SINGLE_QUOTE, T_CHAR_STAR, T_CHAR_HASH) = \
(' ', '\0', '\r', '\t', '\n', '/', '\\', '\"', '\'', '*', '#')
SEPERATOR_TUPLE = ('=', '|', ',', '{', '}')
IncludeFileList = []
# Macro passed from command line, which has greatest priority and can NOT be overridden by those in FDF
InputMacroDict = {}
# All Macro values when parsing file, not replace existing Macro
AllMacroList = []
def GetRealFileLine (File, Line):
InsertedLines = 0
for Profile in IncludeFileList:
if Line >= Profile.InsertStartLineNumber and Line < Profile.InsertStartLineNumber + Profile.InsertAdjust + len(Profile.FileLinesList):
return (Profile.FileName, Line - Profile.InsertStartLineNumber + 1)
if Line >= Profile.InsertStartLineNumber + Profile.InsertAdjust + len(Profile.FileLinesList):
InsertedLines += Profile.InsertAdjust + len(Profile.FileLinesList)
return (File, Line - InsertedLines)
## The exception class that used to report error messages when parsing FDF
#
# Currently the "ToolName" is set to be "FDF Parser".
#
class Warning (Exception):
## The constructor
#
# @param self The object pointer
# @param Str The message to record
# @param File The FDF name
# @param Line The Line number that error occurs
#
def __init__(self, Str, File = None, Line = None):
FileLineTuple = GetRealFileLine(File, Line)
self.FileName = FileLineTuple[0]
self.LineNumber = FileLineTuple[1]
self.message = Str + str(self.LineNumber)
self.ToolName = 'FDF Parser'
## The MACRO class that used to record macro value data when parsing include file
#
#
class MacroProfile :
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName, Line):
self.FileName = FileName
self.DefinedAtLine = Line
self.MacroName = None
self.MacroValue = None
## The Include file content class that used to record file data when parsing include file
#
# May raise Exception when opening file.
#
class IncludeFileProfile :
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileName = FileName
self.FileLinesList = []
try:
fsock = open(FileName, "rb", 0)
try:
self.FileLinesList = fsock.readlines()
finally:
fsock.close()
except IOError:
raise Warning("Error when opening file %s" % FileName)
self.InsertStartLineNumber = None
self.InsertAdjust = 0
## The FDF content class that used to record file data when parsing FDF
#
# May raise Exception when opening file.
#
class FileProfile :
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileLinesList = []
try:
fsock = open(FileName, "rb", 0)
try:
self.FileLinesList = fsock.readlines()
finally:
fsock.close()
except IOError:
raise Warning("Error when opening file %s" % FileName)
self.PcdDict = {}
self.InfList = []
self.PcdFileLineDict = {}
self.InfFileLineList = []
self.FdDict = {}
self.FvDict = {}
self.CapsuleList = []
# self.VtfList = []
# self.RuleDict = {}
## The syntax parser for FDF
#
# PreprocessFile method should be called prior to ParseFile
# CycleReferenceCheck method can detect cycles in FDF contents
#
# GetNext*** procedures mean these procedures will get next token first, then make judgement.
# Get*** procedures mean these procedures will make judgement on current token only.
#
class FdfParser(object):
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.Profile = FileProfile(FileName)
self.FileName = FileName
self.CurrentLineNumber = 1
self.CurrentOffsetWithinLine = 0
self.CurrentFdName = None
self.CurrentFvName = None
self.__Token = ""
self.__SkippedChars = ""
self.__WipeOffArea = []
## __IsWhiteSpace() method
#
# Whether char at current FileBufferPos is whitespace
#
# @param self The object pointer
# @param Char The char to test
# @retval True The char is a kind of white space
# @retval False The char is NOT a kind of white space
#
def __IsWhiteSpace(self, Char):
if Char in (T_CHAR_NULL, T_CHAR_CR, T_CHAR_SPACE, T_CHAR_TAB, T_CHAR_LF):
return True
else:
return False
## __SkipWhiteSpace() method
#
# Skip white spaces from current char, return number of chars skipped
#
# @param self The object pointer
# @retval Count The number of chars skipped
#
def __SkipWhiteSpace(self):
Count = 0
while not self.__EndOfFile():
Count += 1
if self.__CurrentChar() in (T_CHAR_NULL, T_CHAR_CR, T_CHAR_LF, T_CHAR_SPACE, T_CHAR_TAB):
self.__SkippedChars += str(self.__CurrentChar())
self.__GetOneChar()
else:
Count = Count - 1
return Count
## __EndOfFile() method
#
# Judge current buffer pos is at file end
#
# @param self The object pointer
# @retval True Current File buffer position is at file end
# @retval False Current File buffer position is NOT at file end
#
def __EndOfFile(self):
NumberOfLines = len(self.Profile.FileLinesList)
SizeOfLastLine = len(self.Profile.FileLinesList[-1])
if self.CurrentLineNumber == NumberOfLines and self.CurrentOffsetWithinLine >= SizeOfLastLine - 1:
return True
elif self.CurrentLineNumber > NumberOfLines:
return True
else:
return False
## __EndOfLine() method
#
# Judge current buffer pos is at line end
#
# @param self The object pointer
# @retval True Current File buffer position is at line end
# @retval False Current File buffer position is NOT at line end
#
def __EndOfLine(self):
if self.CurrentLineNumber > len(self.Profile.FileLinesList):
return True
SizeOfCurrentLine = len(self.Profile.FileLinesList[self.CurrentLineNumber - 1])
if self.CurrentOffsetWithinLine >= SizeOfCurrentLine:
return True
else:
return False
## Rewind() method
#
# Reset file data buffer to the initial state
#
# @param self The object pointer
#
def Rewind(self):
self.CurrentLineNumber = 1
self.CurrentOffsetWithinLine = 0
## __UndoOneChar() method
#
# Go back one char in the file buffer
#
# @param self The object pointer
# @retval True Successfully go back one char
# @retval False Not able to go back one char as file beginning reached
#
def __UndoOneChar(self):
if self.CurrentLineNumber == 1 and self.CurrentOffsetWithinLine == 0:
return False
elif self.CurrentOffsetWithinLine == 0:
self.CurrentLineNumber -= 1
self.CurrentOffsetWithinLine = len(self.__CurrentLine()) - 1
else:
self.CurrentOffsetWithinLine -= 1
return True
## __GetOneChar() method
#
# Move forward one char in the file buffer
#
# @param self The object pointer
#
def __GetOneChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
else:
self.CurrentOffsetWithinLine += 1
## __CurrentChar() method
#
# Get the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Current char
#
def __CurrentChar(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine]
## __NextChar() method
#
# Get the one char pass the char pointed to by the file buffer pointer
#
# @param self The object pointer
# @retval Char Next char
#
def __NextChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
return self.Profile.FileLinesList[self.CurrentLineNumber][0]
else:
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine + 1]
## __SetCurrentCharValue() method
#
# Modify the value of current char
#
# @param self The object pointer
# @param Value The new value of current char
#
def __SetCurrentCharValue(self, Value):
self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine] = Value
## __CurrentLine() method
#
# Get the list that contains current line contents
#
# @param self The object pointer
# @retval List current line contents
#
def __CurrentLine(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1]
def __StringToList(self):
self.Profile.FileLinesList = [list(s) for s in self.Profile.FileLinesList]
self.Profile.FileLinesList[-1].append(' ')
def __ReplaceMacros(self, Str, File, Line):
MacroEnd = 0
while Str.find('$(', MacroEnd) >= 0:
MacroStart = Str.find('$(', MacroEnd)
if Str.find(')', MacroStart) > 0:
MacroEnd = Str.find(')', MacroStart)
Name = Str[MacroStart + 2 : MacroEnd]
Value = None
if Name in InputMacroDict:
Value = InputMacroDict[Name]
else:
for Profile in AllMacroList:
if Profile.FileName == File and Profile.MacroName == Name and Profile.DefinedAtLine <= Line:
Value = Profile.MacroValue
if Value != None:
Str = Str.replace('$(' + Name + ')', Value)
MacroEnd = MacroStart + len(Value)
else:
raise Warning("Macro not complete At Line ", self.FileName, self.CurrentLineNumber)
return Str
def __ReplaceFragment(self, StartPos, EndPos, Value = ' '):
if StartPos[0] == EndPos[0]:
Offset = StartPos[1]
while Offset <= EndPos[1]:
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
return
Offset = StartPos[1]
while self.Profile.FileLinesList[StartPos[0]][Offset] not in ('\r', '\n'):
self.Profile.FileLinesList[StartPos[0]][Offset] = Value
Offset += 1
Line = StartPos[0]
while Line < EndPos[0]:
Offset = 0
while self.Profile.FileLinesList[Line][Offset] not in ('\r', '\n'):
self.Profile.FileLinesList[Line][Offset] = Value
Offset += 1
Line += 1
Offset = 0
while Offset <= EndPos[1]:
self.Profile.FileLinesList[EndPos[0]][Offset] = Value
Offset += 1
def __GetMacroName(self):
if not self.__GetNextToken():
raise Warning("expected Macro name", self.FileName, self.CurrentLineNumber)
MacroName = self.__Token
NotFlag = False
if MacroName.startswith('!'):
NotFlag = True
MacroName = MacroName[1:].strip()
if not MacroName.startswith('$(') or not MacroName.endswith(')'):
raise Warning("Macro name expected(Please use '$(%(Token)s)' if '%(Token)s' is a macro.)" % {"Token" : MacroName},
self.FileName, self.CurrentLineNumber)
MacroName = MacroName[2:-1]
return MacroName, NotFlag
## PreprocessFile() method
#
# Preprocess file contents, replace comments with spaces.
# In the end, rewind the file buffer pointer to the beginning
# BUGBUG: No !include statement processing contained in this procedure
# !include statement should be expanded at the same FileLinesList[CurrentLineNumber - 1]
#
# @param self The object pointer
#
def PreprocessFile(self):
self.Rewind()
InComment = False
DoubleSlashComment = False
HashComment = False
# HashComment in quoted string " " is ignored.
InString = False
while not self.__EndOfFile():
if self.__CurrentChar() == T_CHAR_DOUBLE_QUOTE and not InComment:
InString = not InString
# meet new line, then no longer in a comment for // and '#'
if self.__CurrentChar() == T_CHAR_LF:
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
if InComment and DoubleSlashComment:
InComment = False
DoubleSlashComment = False
if InComment and HashComment:
InComment = False
HashComment = False
# check for */ comment end
elif InComment and not DoubleSlashComment and not HashComment and self.__CurrentChar() == T_CHAR_STAR and self.__NextChar() == T_CHAR_SLASH:
self.__SetCurrentCharValue(T_CHAR_SPACE)
self.__GetOneChar()
self.__SetCurrentCharValue(T_CHAR_SPACE)
self.__GetOneChar()
InComment = False
# set comments to spaces
elif InComment:
self.__SetCurrentCharValue(T_CHAR_SPACE)
self.__GetOneChar()
# check for // comment
elif self.__CurrentChar() == T_CHAR_SLASH and self.__NextChar() == T_CHAR_SLASH and not self.__EndOfLine():
InComment = True
DoubleSlashComment = True
# check for '#' comment
elif self.__CurrentChar() == T_CHAR_HASH and not self.__EndOfLine() and not InString:
InComment = True
HashComment = True
# check for /* comment start
elif self.__CurrentChar() == T_CHAR_SLASH and self.__NextChar() == T_CHAR_STAR:
self.__SetCurrentCharValue( T_CHAR_SPACE)
self.__GetOneChar()
self.__SetCurrentCharValue( T_CHAR_SPACE)
self.__GetOneChar()
InComment = True
else:
self.__GetOneChar()
# restore from ListOfList to ListOfString
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
self.Rewind()
## PreprocessIncludeFile() method
#
# Preprocess file contents, replace !include statements with file contents.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessIncludeFile(self):
while self.__GetNextToken():
if self.__Token == '!include':
IncludeLine = self.CurrentLineNumber
IncludeOffset = self.CurrentOffsetWithinLine - len('!include')
if not self.__GetNextToken():
raise Warning("expected include file name At Line ", self.FileName, self.CurrentLineNumber)
IncFileName = self.__Token
if not os.path.isabs(IncFileName):
if IncFileName.startswith('$(WORKSPACE)'):
Str = IncFileName.replace('$(WORKSPACE)', os.environ.get('WORKSPACE'))
if os.path.exists(Str):
if not os.path.isabs(Str):
Str = os.path.abspath(Str)
IncFileName = Str
else:
# file is in the same dir with FDF file
FullFdf = self.FileName
if not os.path.isabs(self.FileName):
FullFdf = os.path.join(os.environ.get('WORKSPACE'), self.FileName)
IncFileName = os.path.join(os.path.dirname(FullFdf), IncFileName)
if not os.path.exists(os.path.normpath(IncFileName)):
raise Warning("Include file not exists At Line ", self.FileName, self.CurrentLineNumber)
IncFileProfile = IncludeFileProfile(os.path.normpath(IncFileName))
CurrentLine = self.CurrentLineNumber
CurrentOffset = self.CurrentOffsetWithinLine
# list index of the insertion, note that line number is 'CurrentLine + 1'
InsertAtLine = CurrentLine
IncFileProfile.InsertStartLineNumber = InsertAtLine + 1
# deal with remaining portions after "!include filename", if exists.
if self.__GetNextToken():
if self.CurrentLineNumber == CurrentLine:
RemainingLine = self.__CurrentLine()[CurrentOffset:]
self.Profile.FileLinesList.insert(self.CurrentLineNumber, RemainingLine)
IncFileProfile.InsertAdjust += 1
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
for Line in IncFileProfile.FileLinesList:
self.Profile.FileLinesList.insert(InsertAtLine, Line)
self.CurrentLineNumber += 1
InsertAtLine += 1
IncludeFileList.append(IncFileProfile)
# comment out the processed include file statement
TempList = list(self.Profile.FileLinesList[IncludeLine - 1])
TempList.insert(IncludeOffset, '#')
self.Profile.FileLinesList[IncludeLine - 1] = ''.join(TempList)
self.Rewind()
## PreprocessIncludeFile() method
#
# Preprocess file contents, replace !include statements with file contents.
# In the end, rewind the file buffer pointer to the beginning
#
# @param self The object pointer
#
def PreprocessConditionalStatement(self):
# IfList is a stack of if branches with elements of list [Pos, CondSatisfied, BranchDetermined]
IfList = []
while self.__GetNextToken():
if self.__Token == 'DEFINE':
DefineLine = self.CurrentLineNumber - 1
DefineOffset = self.CurrentOffsetWithinLine - len('DEFINE')
if not self.__GetNextToken():
raise Warning("expected Macro name At Line ", self.FileName, self.CurrentLineNumber)
Macro = self.__Token
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
pass
Value = self.__Token
if not Macro in InputMacroDict:
FileLineTuple = GetRealFileLine(self.FileName, DefineLine + 1)
MacProfile = MacroProfile(FileLineTuple[0], FileLineTuple[1])
MacProfile.MacroName = Macro
MacProfile.MacroValue = Value
AllMacroList.append(MacProfile)
self.__WipeOffArea.append(((DefineLine, DefineOffset), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self.__Token in ('!ifdef', '!ifndef', '!if'):
IfStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self.__Token))
IfList.append([IfStartPos, None, None])
CondLabel = self.__Token
MacroName, NotFlag = self.__GetMacroName()
NotDefineFlag = False
if CondLabel == '!ifndef':
NotDefineFlag = True
if CondLabel == '!ifdef' or CondLabel == '!ifndef':
if NotFlag:
raise Warning("'NOT' operation not allowed for Macro name At Line ", self.FileName, self.CurrentLineNumber)
if CondLabel == '!if':
if not self.__GetNextOp():
raise Warning("expected !endif At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token in ('!=', '==', '>', '<', '>=', '<='):
Op = self.__Token
if not self.__GetNextToken():
raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
pass
MacroValue = self.__Token
ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1, Op, MacroValue)
if NotFlag:
ConditionSatisfied = not ConditionSatisfied
BranchDetermined = ConditionSatisfied
else:
self.CurrentOffsetWithinLine -= len(self.__Token)
ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1, None, 'Bool')
if NotFlag:
ConditionSatisfied = not ConditionSatisfied
BranchDetermined = ConditionSatisfied
IfList[-1] = [IfList[-1][0], ConditionSatisfied, BranchDetermined]
if ConditionSatisfied:
self.__WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1)
if NotDefineFlag:
ConditionSatisfied = not ConditionSatisfied
BranchDetermined = ConditionSatisfied
IfList[-1] = [IfList[-1][0], ConditionSatisfied, BranchDetermined]
if ConditionSatisfied:
self.__WipeOffArea.append((IfStartPos, (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self.__Token in ('!elseif', '!else'):
ElseStartPos = (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len(self.__Token))
if len(IfList) <= 0:
raise Warning("Missing !if statement At Line ", self.FileName, self.CurrentLineNumber)
if IfList[-1][1]:
IfList[-1] = [ElseStartPos, False, True]
self.__WipeOffArea.append((ElseStartPos, (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self.__WipeOffArea.append((IfList[-1][0], ElseStartPos))
IfList[-1] = [ElseStartPos, True, IfList[-1][2]]
if self.__Token == '!elseif':
MacroName, NotFlag = self.__GetMacroName()
if not self.__GetNextOp():
raise Warning("expected !endif At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token in ('!=', '==', '>', '<', '>=', '<='):
Op = self.__Token
if not self.__GetNextToken():
raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
pass
MacroValue = self.__Token
ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1, Op, MacroValue)
if NotFlag:
ConditionSatisfied = not ConditionSatisfied
else:
self.CurrentOffsetWithinLine -= len(self.__Token)
ConditionSatisfied = self.__EvaluateConditional(MacroName, IfList[-1][0][0] + 1, None, 'Bool')
if NotFlag:
ConditionSatisfied = not ConditionSatisfied
IfList[-1] = [IfList[-1][0], ConditionSatisfied, IfList[-1][2]]
if IfList[-1][1]:
if IfList[-1][2]:
IfList[-1][1] = False
else:
IfList[-1][2] = True
self.__WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
elif self.__Token == '!endif':
if IfList[-1][1]:
self.__WipeOffArea.append(((self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - len('!endif')), (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
else:
self.__WipeOffArea.append((IfList[-1][0], (self.CurrentLineNumber - 1, self.CurrentOffsetWithinLine - 1)))
IfList.pop()
if len(IfList) > 0:
raise Warning("Missing !endif At Line ", self.FileName, self.CurrentLineNumber)
self.Rewind()
def __EvaluateConditional(self, Name, Line, Op = None, Value = None):
FileLineTuple = GetRealFileLine(self.FileName, Line)
if Name in InputMacroDict:
MacroValue = InputMacroDict[Name]
if Op == None:
if Value == 'Bool' and MacroValue == None or MacroValue.upper() == 'FALSE':
return False
return True
elif Op == '!=':
if Value != MacroValue:
return True
else:
return False
elif Op == '==':
if Value == MacroValue:
return True
else:
return False
else:
if (self.__IsHex(Value) or Value.isdigit()) and (self.__IsHex(MacroValue) or (MacroValue != None and MacroValue.isdigit())):
InputVal = long(Value, 0)
MacroVal = long(MacroValue, 0)
if Op == '>':
if MacroVal > InputVal:
return True
else:
return False
elif Op == '>=':
if MacroVal >= InputVal:
return True
else:
return False
elif Op == '<':
if MacroVal < InputVal:
return True
else:
return False
elif Op == '<=':
if MacroVal <= InputVal:
return True
else:
return False
else:
return False
else:
raise Warning("Value %s is not a number At Line ", self.FileName, Line)
for Profile in AllMacroList:
if Profile.FileName == FileLineTuple[0] and Profile.MacroName == Name and Profile.DefinedAtLine <= FileLineTuple[1]:
if Op == None:
if Value == 'Bool' and Profile.MacroValue == None or Profile.MacroValue.upper() == 'FALSE':
return False
return True
elif Op == '!=':
if Value != Profile.MacroValue:
return True
else:
return False
elif Op == '==':
if Value == Profile.MacroValue:
return True
else:
return False
else:
if (self.__IsHex(Value) or Value.isdigit()) and (self.__IsHex(Profile.MacroValue) or (Profile.MacroValue != None and Profile.MacroValue.isdigit())):
InputVal = long(Value, 0)
MacroVal = long(Profile.MacroValue, 0)
if Op == '>':
if MacroVal > InputVal:
return True
else:
return False
elif Op == '>=':
if MacroVal >= InputVal:
return True
else:
return False
elif Op == '<':
if MacroVal < InputVal:
return True
else:
return False
elif Op == '<=':
if MacroVal <= InputVal:
return True
else:
return False
else:
return False
else:
raise Warning("Value %s is not a number At Line ", self.FileName, Line)
return False
## __IsToken() method
#
# Check whether input string is found from current char position along
# If found, the string value is put into self.__Token
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def __IsToken(self, String, IgnoreCase = False):
self.__SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].upper().find(String.upper())
else:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
return True
return False
## __IsKeyword() method
#
# Check whether input keyword is found from current char position along, whole word only!
# If found, the string value is put into self.__Token
#
# @param self The object pointer
# @param Keyword The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find string, file buffer pointer moved forward
# @retval False Not able to find string, file buffer pointer not changed
#
def __IsKeyword(self, KeyWord, IgnoreCase = False):
self.__SkipWhiteSpace()
# Only consider the same line, no multi-line token allowed
StartPos = self.CurrentOffsetWithinLine
index = -1
if IgnoreCase:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].upper().find(KeyWord.upper())
else:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].find(KeyWord)
if index == 0:
followingChar = self.__CurrentLine()[self.CurrentOffsetWithinLine + len(KeyWord)]
if not str(followingChar).isspace() and followingChar not in SEPERATOR_TUPLE:
return False
self.CurrentOffsetWithinLine += len(KeyWord)
self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
return True
return False
## __GetNextWord() method
#
# Get next C name from file lines
# If found, the string value is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a C name string, file buffer pointer moved forward
# @retval False Not able to find a C name string, file buffer pointer not changed
#
def __GetNextWord(self):
self.__SkipWhiteSpace()
if self.__EndOfFile():
return False
TempChar = self.__CurrentChar()
StartPos = self.CurrentOffsetWithinLine
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') or TempChar == '_':
self.__GetOneChar()
while not self.__EndOfLine():
TempChar = self.__CurrentChar()
if (TempChar >= 'a' and TempChar <= 'z') or (TempChar >= 'A' and TempChar <= 'Z') \
or (TempChar >= '0' and TempChar <= '9') or TempChar == '_' or TempChar == '-':
self.__GetOneChar()
else:
break
self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
return True
return False
## __GetNextToken() method
#
# Get next token unit before a seperator
# If found, the string value is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a token unit, file buffer pointer moved forward
# @retval False Not able to find a token unit, file buffer pointer not changed
#
def __GetNextToken(self):
# Skip leading spaces, if exist.
self.__SkipWhiteSpace()
if self.__EndOfFile():
return False
# Record the token start position, the position of the first non-space char.
StartPos = self.CurrentOffsetWithinLine
StartLine = self.CurrentLineNumber
while not self.__EndOfLine():
TempChar = self.__CurrentChar()
# Try to find the end char that is not a space and not in seperator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and TempChar not in SEPERATOR_TUPLE:
self.__GetOneChar()
# if we happen to meet a seperator as the first char, we must proceed to get it.
# That is, we get a token that is a seperator char. nomally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPERATOR_TUPLE:
self.__GetOneChar()
break
else:
break
# else:
# return False
EndPos = self.CurrentOffsetWithinLine
if self.CurrentLineNumber != StartLine:
EndPos = len(self.Profile.FileLinesList[StartLine-1])
self.__Token = self.Profile.FileLinesList[StartLine-1][StartPos : EndPos]
if StartPos != self.CurrentOffsetWithinLine:
return True
else:
return False
def __GetNextOp(self):
# Skip leading spaces, if exist.
self.__SkipWhiteSpace()
if self.__EndOfFile():
return False
# Record the token start position, the position of the first non-space char.
StartPos = self.CurrentOffsetWithinLine
while not self.__EndOfLine():
TempChar = self.__CurrentChar()
# Try to find the end char that is not a space
if not str(TempChar).isspace():
self.__GetOneChar()
else:
break
else:
return False
if StartPos != self.CurrentOffsetWithinLine:
self.__Token = self.__CurrentLine()[StartPos : self.CurrentOffsetWithinLine]
return True
else:
return False
## __GetNextGuid() method
#
# Get next token unit before a seperator
# If found, the GUID string is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a registry format GUID, file buffer pointer moved forward
# @retval False Not able to find a registry format GUID, file buffer pointer not changed
#
def __GetNextGuid(self):
if not self.__GetNextToken():
return False
p = re.compile('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')
if p.match(self.__Token) != None:
return True
else:
self.__UndoToken()
return False
## __UndoToken() method
#
# Go back one token unit in file buffer
#
# @param self The object pointer
#
def __UndoToken(self):
self.__UndoOneChar()
while self.__CurrentChar().isspace():
if not self.__UndoOneChar():
self.__GetOneChar()
return
StartPos = self.CurrentOffsetWithinLine
CurrentLine = self.CurrentLineNumber
while CurrentLine == self.CurrentLineNumber:
TempChar = self.__CurrentChar()
# Try to find the end char that is not a space and not in seperator tuple.
# That is, when we got a space or any char in the tuple, we got the end of token.
if not str(TempChar).isspace() and not TempChar in SEPERATOR_TUPLE:
if not self.__UndoOneChar():
break
# if we happen to meet a seperator as the first char, we must proceed to get it.
# That is, we get a token that is a seperator char. nomally it is the boundary of other tokens.
elif StartPos == self.CurrentOffsetWithinLine and TempChar in SEPERATOR_TUPLE:
return
else:
break
self.__GetOneChar()
## __HexDigit() method
#
# Whether char input is a Hex data bit
#
# @param self The object pointer
# @param TempChar The char to test
# @retval True The char is a Hex data bit
# @retval False The char is NOT a Hex data bit
#
def __HexDigit(self, TempChar):
if (TempChar >= 'a' and TempChar <= 'f') or (TempChar >= 'A' and TempChar <= 'F') \
or (TempChar >= '0' and TempChar <= '9'):
return True
else:
return False
def __IsHex(self, HexStr):
if not HexStr.upper().startswith("0X"):
return False
if len(self.__Token) <= 2:
return False
charList = [c for c in HexStr[2 : ] if not self.__HexDigit( c)]
if len(charList) == 0:
return True
else:
return False
## __GetNextHexNumber() method
#
# Get next HEX data before a seperator
# If found, the HEX data is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a HEX data, file buffer pointer moved forward
# @retval False Not able to find a HEX data, file buffer pointer not changed
#
def __GetNextHexNumber(self):
if not self.__GetNextToken():
return False
if self.__IsHex(self.__Token):
return True
else:
self.__UndoToken()
return False
## __GetNextDecimalNumber() method
#
# Get next decimal data before a seperator
# If found, the decimal data is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a decimal data, file buffer pointer moved forward
# @retval False Not able to find a decimal data, file buffer pointer not changed
#
def __GetNextDecimalNumber(self):
if not self.__GetNextToken():
return False
if self.__Token.isdigit():
return True
else:
self.__UndoToken()
return False
## __GetNextPcdName() method
#
# Get next PCD token space C name and PCD C name pair before a seperator
# If found, the decimal data is put into self.__Token
#
# @param self The object pointer
# @retval Tuple PCD C name and PCD token space C name pair
#
def __GetNextPcdName(self):
if not self.__GetNextWord():
raise Warning("expected PcdTokenSpaceCName.PcdCName At Line ", self.FileName, self.CurrentLineNumber)
pcdTokenSpaceCName = self.__Token
if not self.__IsToken( "."):
raise Warning("expected PcdTokenSpaceCName.PcdCName At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextWord():
raise Warning("expected PcdTokenSpaceCName.PcdCName At Line ", self.FileName, self.CurrentLineNumber)
pcdCName = self.__Token
return (pcdCName, pcdTokenSpaceCName)
## __GetStringData() method
#
# Get string contents quoted in ""
# If found, the decimal data is put into self.__Token
#
# @param self The object pointer
# @retval True Successfully find a string data, file buffer pointer moved forward
# @retval False Not able to find a string data, file buffer pointer not changed
#
def __GetStringData(self):
if self.__Token.startswith("\"") or self.__Token.startswith("L\""):
self.__UndoToken()
self.__SkipToToken("\"")
currentLineNumber = self.CurrentLineNumber
if not self.__SkipToToken("\""):
raise Warning("Missing Quote \" for String At Line ", self.FileName, self.CurrentLineNumber)
if currentLineNumber != self.CurrentLineNumber:
raise Warning("Missing Quote \" for String At Line ", self.FileName, self.CurrentLineNumber)
self.__Token = self.__SkippedChars.rstrip('\"')
return True
elif self.__Token.startswith("\'") or self.__Token.startswith("L\'"):
self.__UndoToken()
self.__SkipToToken("\'")
currentLineNumber = self.CurrentLineNumber
if not self.__SkipToToken("\'"):
raise Warning("Missing Quote \' for String At Line ", self.FileName, self.CurrentLineNumber)
if currentLineNumber != self.CurrentLineNumber:
raise Warning("Missing Quote \' for String At Line ", self.FileName, self.CurrentLineNumber)
self.__Token = self.__SkippedChars.rstrip('\'')
return True
else:
return False
## __SkipToToken() method
#
# Search forward in file buffer for the string
# The skipped chars are put into self.__SkippedChars
#
# @param self The object pointer
# @param String The string to search
# @param IgnoreCase Indicate case sensitive/non-sensitive search, default is case sensitive
# @retval True Successfully find the string, file buffer pointer moved forward
# @retval False Not able to find the string, file buffer pointer not changed
#
def __SkipToToken(self, String, IgnoreCase = False):
StartPos = self.GetFileBufferPos()
self.__SkippedChars = ""
while not self.__EndOfFile():
index = -1
if IgnoreCase:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].upper().find(String.upper())
else:
index = self.__CurrentLine()[self.CurrentOffsetWithinLine : ].find(String)
if index == 0:
self.CurrentOffsetWithinLine += len(String)
self.__SkippedChars += String
return True
self.__SkippedChars += str(self.__CurrentChar())
self.__GetOneChar()
self.SetFileBufferPos( StartPos)
self.__SkippedChars = ""
return False
## GetFileBufferPos() method
#
# Return the tuple of current line and offset within the line
#
# @param self The object pointer
# @retval Tuple Line number and offset pair
#
def GetFileBufferPos(self):
return (self.CurrentLineNumber, self.CurrentOffsetWithinLine)
## SetFileBufferPos() method
#
# Restore the file buffer position
#
# @param self The object pointer
# @param Pos The new file buffer position
#
def SetFileBufferPos(self, Pos):
(self.CurrentLineNumber, self.CurrentOffsetWithinLine) = Pos
## ParseFile() method
#
# Parse the file profile buffer to extract fd, fv ... information
# Exception will be raised if syntax error found
#
# @param self The object pointer
#
def ParseFile(self):
try:
self.__StringToList()
self.PreprocessFile()
self.PreprocessIncludeFile()
self.__StringToList()
self.PreprocessFile()
self.PreprocessConditionalStatement()
self.__StringToList()
for Pos in self.__WipeOffArea:
self.__ReplaceFragment(Pos[0], Pos[1])
self.Profile.FileLinesList = ["".join(list) for list in self.Profile.FileLinesList]
while self.__GetDefines():
pass
Index = 0
while Index < len(self.Profile.FileLinesList):
FileLineTuple = GetRealFileLine(self.FileName, Index + 1)
self.Profile.FileLinesList[Index] = self.__ReplaceMacros(self.Profile.FileLinesList[Index], FileLineTuple[0], FileLineTuple[1])
Index += 1
while self.__GetFd():
pass
while self.__GetFv():
pass
while self.__GetCapsule():
pass
# while self.__GetVtf():
# pass
#
# while self.__GetRule():
# pass
except Warning, X:
self.__UndoToken()
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
X.message += '\nGot Token: \"%s\" from File %s\n' % (self.__Token, FileLineTuple[0]) + \
'Previous Token: \"%s\" At line: %d, Offset Within Line: %d\n' \
% (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :].rstrip('\n').rstrip('\r'), FileLineTuple[1], self.CurrentOffsetWithinLine)
raise
## __GetDefines() method
#
# Get Defines section contents and store its data into AllMacrosList
#
# @param self The object pointer
# @retval True Successfully find a Defines
# @retval False Not able to find a Defines
#
def __GetDefines(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[DEFINES"):
if not S.startswith("[FD.") and not S.startswith("[FV.") and not S.startswith("[CAPSULE.") \
and not S.startswith("[VTF.") and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [DEFINES], [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[DEFINES", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
#print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
# % (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [DEFINES", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "]"):
raise Warning("expected ']'", self.FileName, self.CurrentLineNumber)
while self.__GetNextWord():
Macro = self.__Token
if not self.__IsToken("="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken() or self.__Token.startswith('['):
raise Warning("expected MACRO value", self.FileName, self.CurrentLineNumber)
Value = self.__Token
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
MacProfile = MacroProfile(FileLineTuple[0], FileLineTuple[1])
MacProfile.MacroName = Macro
MacProfile.MacroValue = Value
AllMacroList.append(MacProfile)
return False
## __GetFd() method
#
# Get FD section contents and store its data into FD dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FD
# @retval False Not able to find a FD
#
def __GetFd(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[FD."):
if not S.startswith("[FV.") and not S.startswith("[CAPSULE.") \
and not S.startswith("[VTF.") and not S.startswith("[RULE."):
raise Warning("Unknown section At Line ", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[FD.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
% (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [FD.] At Line ", self.FileName, self.CurrentLineNumber)
FdName = self.__GetUiName()
self.CurrentFdName = FdName.upper()
if not self.__IsToken( "]"):
raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
FdObj = CommonDataClass.FdfClass.FDClassObject()
FdObj.FdUiName = self.CurrentFdName
self.Profile.FdDict[self.CurrentFdName] = FdObj
Status = self.__GetCreateFile(FdObj)
if not Status:
raise Warning("FD name error At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetTokenStatements(FdObj):
return False
self.__GetDefineStatements(FdObj)
self.__GetSetStatements(FdObj)
if not self.__GetRegionLayout(FdObj):
raise Warning("expected region layout At Line ", self.FileName, self.CurrentLineNumber)
while self.__GetRegionLayout(FdObj):
pass
return True
## __GetUiName() method
#
# Return the UI name of a section
#
# @param self The object pointer
# @retval FdName UI name
#
def __GetUiName(self):
FdName = ""
if self.__GetNextWord():
FdName = self.__Token
return FdName
## __GetCreateFile() method
#
# Return the output file name of object
#
# @param self The object pointer
# @param Obj object whose data will be stored in file
# @retval FdName UI name
#
def __GetCreateFile(self, Obj):
if self.__IsKeyword( "CREATE_FILE"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected file name At Line ", self.FileName, self.CurrentLineNumber)
FileName = self.__Token
Obj.CreateFileName = FileName
return True
## __GetTokenStatements() method
#
# Get token statements
#
# @param self The object pointer
# @param Obj for whom token statement is got
# @retval True Successfully find a token statement
# @retval False Not able to find a token statement
#
def __GetTokenStatements(self, Obj):
if not self.__IsKeyword( "BaseAddress"):
raise Warning("BaseAddress missing At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex base address At Line ", self.FileName, self.CurrentLineNumber)
Obj.BaseAddress = self.__Token
if self.__IsToken( "|"):
pcdPair = self.__GetNextPcdName()
Obj.BaseAddressPcd = pcdPair
self.Profile.PcdDict[pcdPair] = long(Obj.BaseAddress, 0)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
if not self.__IsKeyword( "Size"):
raise Warning("Size missing At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex size At Line ", self.FileName, self.CurrentLineNumber)
Obj.Size = long(self.__Token, 0)
if self.__IsToken( "|"):
pcdPair = self.__GetNextPcdName()
Obj.SizePcd = pcdPair
self.Profile.PcdDict[pcdPair] = Obj.Size
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
if not self.__IsKeyword( "ErasePolarity"):
raise Warning("ErasePolarity missing At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Erase Polarity At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token != "1" and self.__Token != "0":
raise Warning("expected 1 or 0 Erase Polarity At Line ", self.FileName, self.CurrentLineNumber)
Obj.ErasePolarity = self.__Token
Status = self.__GetBlockStatements(Obj)
return Status
## __GetAddressStatements() method
#
# Get address statements
#
# @param self The object pointer
# @param Obj for whom address statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetAddressStatements(self, Obj):
if self.__IsKeyword("BsBaseAddress"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextDecimalNumber() and not self.__GetNextHexNumber():
raise Warning("expected address At Line ", self.FileName, self.CurrentLineNumber)
BsAddress = long(self.__Token, 0)
Obj.BsBaseAddress = BsAddress
if self.__IsKeyword("RtBaseAddress"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextDecimalNumber() and not self.__GetNextHexNumber():
raise Warning("expected address At Line ", self.FileName, self.CurrentLineNumber)
RtAddress = long(self.__Token, 0)
Obj.RtBaseAddress = RtAddress
## __GetBlockStatements() method
#
# Get block statements
#
# @param self The object pointer
# @param Obj for whom block statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetBlockStatements(self, Obj):
if not self.__GetBlockStatement(Obj):
#set default block size is 1
Obj.BlockSizeList.append((1, Obj.Size, None))
return True
while self.__GetBlockStatement(Obj):
pass
for Item in Obj.BlockSizeList:
if Item[0] == None or Item[1] == None:
raise Warning("expected block statement for Fd Section", self.FileName, self.CurrentLineNumber)
return True
## __GetBlockStatement() method
#
# Get block statement
#
# @param self The object pointer
# @param Obj for whom block statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetBlockStatement(self, Obj):
if not self.__IsKeyword( "BlockSize"):
return False
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber() and not self.__GetNextDecimalNumber():
raise Warning("expected Hex block size At Line ", self.FileName, self.CurrentLineNumber)
BlockSize = long(self.__Token, 0)
BlockSizePcd = None
if self.__IsToken( "|"):
PcdPair = self.__GetNextPcdName()
BlockSizePcd = PcdPair
self.Profile.PcdDict[PcdPair] = BlockSize
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[pcdPair] = FileLineTuple
BlockNumber = None
if self.__IsKeyword( "NumBlocks"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextDecimalNumber() and not self.__GetNextHexNumber():
raise Warning("expected block numbers At Line ", self.FileName, self.CurrentLineNumber)
BlockNumber = long(self.__Token, 0)
Obj.BlockSizeList.append((BlockSize, BlockNumber, BlockSizePcd))
return True
## __GetDefineStatements() method
#
# Get define statements
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetDefineStatements(self, Obj):
while self.__GetDefineStatement( Obj):
pass
## __GetDefineStatement() method
#
# Get define statement
#
# @param self The object pointer
# @param Obj for whom define statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetDefineStatement(self, Obj):
if self.__IsKeyword("DEFINE"):
self.__GetNextToken()
Macro = self.__Token
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
Value = self.__Token
Macro = '$(' + Macro + ')'
Obj.DefineVarDict[Macro] = Value
return True
return False
## __GetSetStatements() method
#
# Get set statements
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetSetStatements(self, Obj):
while self.__GetSetStatement(Obj):
pass
## __GetSetStatement() method
#
# Get set statement
#
# @param self The object pointer
# @param Obj for whom set statement is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetSetStatement(self, Obj):
if self.__IsKeyword("SET"):
PcdPair = self.__GetNextPcdName()
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected value At Line ", self.FileName, self.CurrentLineNumber)
Value = self.__Token
if Value.startswith("{"):
# deal with value with {}
if not self.__SkipToToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
Value += self.__SkippedChars
Obj.SetVarDict[PcdPair] = Value
self.Profile.PcdDict[PcdPair] = Value
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[PcdPair] = FileLineTuple
return True
return False
## __GetRegionLayout() method
#
# Get region layout for FD
#
# @param self The object pointer
# @param Fd for whom region is got
# @retval True Successfully find
# @retval False Not able to find
#
def __GetRegionLayout(self, Fd):
if not self.__GetNextHexNumber():
return False
RegionObj = CommonDataClass.FdfClass.RegionClassObject()
RegionObj.Offset = long(self.__Token, 0)
Fd.RegionList.append(RegionObj)
if not self.__IsToken( "|"):
raise Warning("expected '|' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Region Size At Line ", self.FileName, self.CurrentLineNumber)
RegionObj.Size = long(self.__Token, 0)
if not self.__GetNextWord():
return True
if not self.__Token in ("SET", "FV", "FILE", "DATA", "CAPSULE"):
self.__UndoToken()
RegionObj.PcdOffset = self.__GetNextPcdName()
self.Profile.PcdDict[RegionObj.PcdOffset] = RegionObj.Offset + long(Fd.BaseAddress, 0)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdOffset] = FileLineTuple
if self.__IsToken( "|"):
RegionObj.PcdSize = self.__GetNextPcdName()
self.Profile.PcdDict[RegionObj.PcdSize] = RegionObj.Size
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.PcdFileLineDict[RegionObj.PcdSize] = FileLineTuple
if not self.__GetNextWord():
return True
if self.__Token == "SET":
self.__UndoToken()
self.__GetSetStatements( RegionObj)
if not self.__GetNextWord():
return True
elif self.__Token == "FV":
self.__UndoToken()
self.__GetRegionFvType( RegionObj)
elif self.__Token == "CAPSULE":
self.__UndoToken()
self.__GetRegionCapType( RegionObj)
elif self.__Token == "FILE":
self.__UndoToken()
self.__GetRegionFileType( RegionObj)
else:
self.__UndoToken()
self.__GetRegionDataType( RegionObj)
return True
## __GetRegionFvType() method
#
# Get region fv data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def __GetRegionFvType(self, RegionObj):
if not self.__IsKeyword( "FV"):
raise Warning("expected Keyword 'FV' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "FV"
RegionObj.RegionDataList.append(self.__Token)
while self.__IsKeyword( "FV"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self.__Token)
## __GetRegionCapType() method
#
# Get region capsule data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def __GetRegionCapType(self, RegionObj):
if not self.__IsKeyword("CAPSULE"):
raise Warning("expected Keyword 'CAPSULE' at line", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '=' at line", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected CAPSULE name at line", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "CAPSULE"
RegionObj.RegionDataList.append(self.__Token)
while self.__IsKeyword("CAPSULE"):
if not self.__IsToken("="):
raise Warning("expected '=' at line", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected CAPSULE name at line", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self.__Token)
## __GetRegionFileType() method
#
# Get region file data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def __GetRegionFileType(self, RegionObj):
if not self.__IsKeyword( "FILE"):
raise Warning("expected Keyword 'FILE' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected File name At Line ", self.FileName, self.CurrentLineNumber)
RegionObj.RegionType = "FILE"
RegionObj.RegionDataList.append( self.__Token)
while self.__IsKeyword( "FILE"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FILE name At Line ", self.FileName, self.CurrentLineNumber)
RegionObj.RegionDataList.append(self.__Token)
## __GetRegionDataType() method
#
# Get region array data for region
#
# @param self The object pointer
# @param RegionObj for whom region data is got
#
def __GetRegionDataType(self, RegionObj):
if not self.__IsKeyword( "DATA"):
raise Warning("expected Region Data type At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex byte At Line ", self.FileName, self.CurrentLineNumber)
if len(self.__Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
DataString = self.__Token
DataString += ","
while self.__IsToken(","):
if not self.__GetNextHexNumber():
raise Warning("Invalid Hex number At Line ", self.FileName, self.CurrentLineNumber)
if len(self.__Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long At Line ", self.FileName, self.CurrentLineNumber)
DataString += self.__Token
DataString += ","
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(",")
RegionObj.RegionType = "DATA"
RegionObj.RegionDataList.append( DataString)
while self.__IsKeyword( "DATA"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextHexNumber():
raise Warning("expected Hex byte At Line ", self.FileName, self.CurrentLineNumber)
if len(self.__Token) > 18:
raise Warning("Hex string can't be converted to a valid UINT64 value", self.FileName, self.CurrentLineNumber)
DataString = self.__Token
DataString += ","
while self.__IsToken(","):
self.__GetNextHexNumber()
if len(self.__Token) > 4:
raise Warning("Hex byte(must be 2 digits) too long At Line ", self.FileName, self.CurrentLineNumber)
DataString += self.__Token
DataString += ","
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
DataString = DataString.rstrip(",")
RegionObj.RegionDataList.append( DataString)
## __GetFv() method
#
# Get FV section contents and store its data into FV dictionary of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a FV
# @retval False Not able to find a FV
#
def __GetFv(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[FV."):
if not S.startswith("[CAPSULE.") \
and not S.startswith("[VTF.") and not S.startswith("[RULE."):
raise Warning("Unknown section or section appear sequence error \n(The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.]) At Line ", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[FV.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
% (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("Unknown Keyword At Line ", self.FileName, self.CurrentLineNumber)
FvName = self.__GetUiName()
self.CurrentFvName = FvName.upper()
if not self.__IsToken( "]"):
raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
FvObj = CommonDataClass.FdfClass.FvClassObject()
FvObj.UiFvName = self.CurrentFvName
self.Profile.FvDict[self.CurrentFvName] = FvObj
Status = self.__GetCreateFile(FvObj)
if not Status:
raise Warning("FV name error At Line ", self.FileName, self.CurrentLineNumber)
self.__GetDefineStatements(FvObj)
self.__GetAddressStatements(FvObj)
self.__GetBlockStatement(FvObj)
self.__GetSetStatements(FvObj)
self.__GetFvAlignment(FvObj)
self.__GetFvAttributes(FvObj)
self.__GetFvNameGuid(FvObj)
self.__GetAprioriSection(FvObj, FvObj.DefineVarDict.copy())
self.__GetAprioriSection(FvObj, FvObj.DefineVarDict.copy())
while True:
isInf = self.__GetInfStatement(FvObj, MacroDict = FvObj.DefineVarDict.copy())
isFile = self.__GetFileStatement(FvObj, MacroDict = FvObj.DefineVarDict.copy())
if not isInf and not isFile:
break
return True
## __GetFvAlignment() method
#
# Get alignment for FV
#
# @param self The object pointer
# @param Obj for whom alignment is got
# @retval True Successfully find a alignment statement
# @retval False Not able to find a alignment statement
#
def __GetFvAlignment(self, Obj):
if not self.__IsKeyword( "FvAlignment"):
return False
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected alignment value At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token.upper() not in ("1", "2", "4", "8", "16", "32", "64", "128", "256", "512", \
"1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", \
"1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", \
"1G", "2G"):
raise Warning("Unknown alignment value At Line ", self.FileName, self.CurrentLineNumber)
Obj.FvAlignment = self.__Token
return True
## __GetFvAttributes() method
#
# Get attributes for FV
#
# @param self The object pointer
# @param Obj for whom attribute is got
# @retval None
#
def __GetFvAttributes(self, FvObj):
while self.__GetNextWord():
name = self.__Token
if name not in ("ERASE_POLARITY", "MEMORY_MAPPED", \
"STICKY_WRITE", "LOCK_CAP", "LOCK_STATUS", "WRITE_ENABLED_CAP", \
"WRITE_DISABLED_CAP", "WRITE_STATUS", "READ_ENABLED_CAP", \
"READ_DISABLED_CAP", "READ_STATUS", "READ_LOCK_CAP", \
"READ_LOCK_STATUS", "WRITE_LOCK_CAP", "WRITE_LOCK_STATUS", \
"WRITE_POLICY_RELIABLE"):
self.__UndoToken()
return
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken() or self.__Token.upper() not in ("TRUE", "FALSE", "1", "0"):
raise Warning("expected TRUE/FALSE (1/0) At Line ", self.FileName, self.CurrentLineNumber)
FvObj.FvAttributeDict[name] = self.__Token
return
## __GetFvNameGuid() method
#
# Get FV GUID for FV
#
# @param self The object pointer
# @param Obj for whom GUID is got
# @retval None
#
def __GetFvNameGuid(self, FvObj):
if not self.__IsKeyword( "FvNameGuid"):
return
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextGuid():
raise Warning("expected FV GUID value", self.FileName, self.CurrentLineNumber)
FvObj.FvNameGuid = self.__Token
return
## __GetAprioriSection() method
#
# Get token statements
#
# @param self The object pointer
# @param FvObj for whom apriori is got
# @param MacroDict dictionary used to replace macro
# @retval True Successfully find apriori statement
# @retval False Not able to find apriori statement
#
def __GetAprioriSection(self, FvObj, MacroDict = {}):
if not self.__IsKeyword( "APRIORI"):
return False
if not self.__IsKeyword("PEI") and not self.__IsKeyword("DXE"):
raise Warning("expected Apriori file type At Line ", self.FileName, self.CurrentLineNumber)
AprType = self.__Token
if not self.__IsToken( "{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
AprSectionObj = CommonDataClass.FdfClass.AprioriSectionClassObject()
AprSectionObj.AprioriType = AprType
self.__GetDefineStatements(AprSectionObj)
MacroDict.update(AprSectionObj.DefineVarDict)
while True:
IsInf = self.__GetInfStatement( AprSectionObj, MacroDict = MacroDict)
IsFile = self.__GetFileStatement( AprSectionObj)
if not IsInf and not IsFile:
break
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
FvObj.AprioriSectionList.append(AprSectionObj)
return True
## __GetInfStatement() method
#
# Get INF statements
#
# @param self The object pointer
# @param Obj for whom inf statement is got
# @param MacroDict dictionary used to replace macro
# @retval True Successfully find inf statement
# @retval False Not able to find inf statement
#
def __GetInfStatement(self, Obj, ForCapsule = False, MacroDict = {}):
if not self.__IsKeyword( "INF"):
return False
ffsInf = CommonDataClass.FdfClass.FfsInfStatementClassObject()
self.__GetInfOptions( ffsInf)
if not self.__GetNextToken():
raise Warning("expected INF file path At Line ", self.FileName, self.CurrentLineNumber)
ffsInf.InfFileName = self.__Token
# if ffsInf.InfFileName.find('$') >= 0:
# ffsInf.InfFileName = GenFdsGlobalVariable.GenFdsGlobalVariable.MacroExtend(ffsInf.InfFileName, MacroDict)
if not ffsInf.InfFileName in self.Profile.InfList:
self.Profile.InfList.append(ffsInf.InfFileName)
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
self.Profile.InfFileLineList.append(FileLineTuple)
if self.__IsToken('|'):
if self.__IsKeyword('RELOCS_STRIPPED'):
ffsInf.KeepReloc = False
elif self.__IsKeyword('RELOCS_RETAINED'):
ffsInf.KeepReloc = True
else:
raise Warning("Unknown reloc strip flag At Line ", self.FileName, self.CurrentLineNumber)
if ForCapsule:
capsuleFfs = CapsuleData.CapsuleFfs()
capsuleFfs.Ffs = ffsInf
Obj.CapsuleDataList.append(capsuleFfs)
else:
Obj.FfsList.append(ffsInf)
return True
## __GetInfOptions() method
#
# Get options for INF
#
# @param self The object pointer
# @param FfsInfObj for whom option is got
#
def __GetInfOptions(self, FfsInfObj):
if self.__IsKeyword( "RuleOverride"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Rule name At Line ", self.FileName, self.CurrentLineNumber)
FfsInfObj.Rule = self.__Token
if self.__IsKeyword( "VERSION"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Version At Line ", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
FfsInfObj.Version = self.__Token
if self.__IsKeyword( "UI"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected UI name At Line ", self.FileName, self.CurrentLineNumber)
if self.__GetStringData():
FfsInfObj.Ui = self.__Token
if self.__IsKeyword( "USE"):
if not self.__IsToken( "="):
raise Warning("expected '='", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected ARCH name", self.FileName, self.CurrentLineNumber)
FfsInfObj.UseArch = self.__Token
if self.__GetNextToken():
p = re.compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
if p.match(self.__Token):
FfsInfObj.KeyStringList.append(self.__Token)
if not self.__IsToken(","):
return
else:
self.__UndoToken()
return
while self.__GetNextToken():
if not p.match(self.__Token):
raise Warning("expected KeyString \"Target_Tag_Arch\" At Line ", self.FileName, self.CurrentLineNumber)
FfsInfObj.KeyStringList.append(self.__Token)
if not self.__IsToken(","):
break
## __GetFileStatement() method
#
# Get FILE statements
#
# @param self The object pointer
# @param Obj for whom FILE statement is got
# @param MacroDict dictionary used to replace macro
# @retval True Successfully find FILE statement
# @retval False Not able to find FILE statement
#
def __GetFileStatement(self, Obj, ForCapsule = False, MacroDict = {}):
if not self.__IsKeyword( "FILE"):
return False
FfsFileObj = CommonDataClass.FdfClass.FileStatementClassObject()
if not self.__GetNextWord():
raise Warning("expected FFS type At Line ", self.FileName, self.CurrentLineNumber)
FfsFileObj.FvFileType = self.__Token
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextGuid():
if not self.__GetNextWord():
raise Warning("expected File GUID", self.FileName, self.CurrentLineNumber)
if self.__Token == 'PCD':
if not self.__IsToken( "("):
raise Warning("expected '('", self.FileName, self.CurrentLineNumber)
PcdPair = self.__GetNextPcdName()
if not self.__IsToken( ")"):
raise Warning("expected ')'", self.FileName, self.CurrentLineNumber)
self.__Token = 'PCD('+PcdPair[1]+'.'+PcdPair[0]+')'
FfsFileObj.NameGuid = self.__Token
self.__GetFilePart( FfsFileObj, MacroDict.copy())
if ForCapsule:
capsuleFfs = CapsuleData.CapsuleFfs()
capsuleFfs.Ffs = FfsFileObj
Obj.CapsuleDataList.append(capsuleFfs)
else:
Obj.FfsList.append(FfsFileObj)
return True
## __FileCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a file type.
#
# @param self The object pointer
# @param FileType The file type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
def __FileCouldHaveRelocFlag (self, FileType):
if FileType in ('SEC', 'PEI_CORE', 'PEIM', 'PEI_DXE_COMBO'):
return True
else:
return False
## __SectionCouldHaveRelocFlag() method
#
# Check whether reloc strip flag can be set for a section type.
#
# @param self The object pointer
# @param SectionType The section type to check with
# @retval True This type could have relocation strip flag
# @retval False No way to have it
#
def __SectionCouldHaveRelocFlag (self, SectionType):
if SectionType in ('TE', 'PE32'):
return True
else:
return False
## __GetFilePart() method
#
# Get components for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom component is got
# @param MacroDict dictionary used to replace macro
#
def __GetFilePart(self, FfsFileObj, MacroDict = {}):
self.__GetFileOpts( FfsFileObj)
if not self.__IsToken("{"):
# if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
# if self.__FileCouldHaveRelocFlag(FfsFileObj.FvFileType):
# if self.__Token == 'RELOCS_STRIPPED':
# FfsFileObj.KeepReloc = False
# else:
# FfsFileObj.KeepReloc = True
# else:
# raise Warning("File type %s could not have reloc strip flag At Line %d" % (FfsFileObj.FvFileType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
#
# if not self.__IsToken("{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected File name or section data At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token == "FV":
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
FfsFileObj.FvName = self.__Token
elif self.__Token == "FD":
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FD name At Line ", self.FileName, self.CurrentLineNumber)
FfsFileObj.FdName = self.__Token
elif self.__Token in ("DEFINE", "APRIORI", "SECTION"):
self.__UndoToken()
self.__GetSectionData( FfsFileObj, MacroDict)
else:
FfsFileObj.FileName = self.__Token
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
## __GetFileOpts() method
#
# Get options for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom options is got
#
def __GetFileOpts(self, FfsFileObj):
if self.__GetNextToken():
Pattern = re.compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
if Pattern.match(self.__Token):
FfsFileObj.KeyStringList.append(self.__Token)
if self.__IsToken(","):
while self.__GetNextToken():
if not Pattern.match(self.__Token):
raise Warning("expected KeyString \"Target_Tag_Arch\" At Line ", self.FileName, self.CurrentLineNumber)
FfsFileObj.KeyStringList.append(self.__Token)
if not self.__IsToken(","):
break
else:
self.__UndoToken()
if self.__IsKeyword( "FIXED", True):
FfsFileObj.Fixed = True
if self.__IsKeyword( "CHECKSUM", True):
FfsFileObj.CheckSum = True
if self.__GetAlignment():
FfsFileObj.Alignment = self.__Token
## __GetAlignment() method
#
# Return the alignment value
#
# @param self The object pointer
# @retval True Successfully find alignment
# @retval False Not able to find alignment
#
def __GetAlignment(self):
if self.__IsKeyword( "Align", True):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected alignment value At Line ", self.FileName, self.CurrentLineNumber)
return True
return False
## __GetFilePart() method
#
# Get section data for FILE statement
#
# @param self The object pointer
# @param FfsFileObj for whom section is got
# @param MacroDict dictionary used to replace macro
#
def __GetSectionData(self, FfsFileObj, MacroDict = {}):
Dict = {}
Dict.update(MacroDict)
self.__GetDefineStatements(FfsFileObj)
Dict.update(FfsFileObj.DefineVarDict)
self.__GetAprioriSection(FfsFileObj, Dict.copy())
self.__GetAprioriSection(FfsFileObj, Dict.copy())
while True:
IsLeafSection = self.__GetLeafSection(FfsFileObj, Dict)
IsEncapSection = self.__GetEncapsulationSec(FfsFileObj)
if not IsLeafSection and not IsEncapSection:
break
## __GetLeafSection() method
#
# Get leaf section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @param MacroDict dictionary used to replace macro
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetLeafSection(self, Obj, MacroDict = {}):
OldPos = self.GetFileBufferPos()
if not self.__IsKeyword( "SECTION"):
if len(Obj.SectionList) == 0:
raise Warning("expected SECTION At Line ", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self.__GetAlignment():
if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
AlignValue = self.__Token
BuildNum = None
if self.__IsKeyword( "BUILD_NUM"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected Build number value At Line ", self.FileName, self.CurrentLineNumber)
BuildNum = self.__Token
if self.__IsKeyword( "VERSION"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected version At Line ", self.FileName, self.CurrentLineNumber)
VerSectionObj = CommonDataClass.FdfClass.VerSectionClassObject()
VerSectionObj.Alignment = AlignValue
VerSectionObj.BuildNum = BuildNum
if self.__GetStringData():
VerSectionObj.StringData = self.__Token
else:
VerSectionObj.FileName = self.__Token
Obj.SectionList.append(VerSectionObj)
elif self.__IsKeyword( "UI"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected UI At Line ", self.FileName, self.CurrentLineNumber)
UiSectionObj = CommonDataClass.FdfClass.UiSectionClassObject()
UiSectionObj.Alignment = AlignValue
if self.__GetStringData():
UiSectionObj.StringData = self.__Token
else:
UiSectionObj.FileName = self.__Token
Obj.SectionList.append(UiSectionObj)
elif self.__IsKeyword( "FV_IMAGE"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextWord():
raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
FvName = self.__Token.upper()
FvObj = None
if self.__IsToken( "{"):
FvObj = Fv.FV()
FvObj.UiFvName = FvName
self.__GetDefineStatements(FvObj)
MacroDict.update(FvObj.DefineVarDict)
self.__GetBlockStatement(FvObj)
self.__GetSetStatements(FvObj)
self.__GetFvAlignment(FvObj)
self.__GetFvAttributes(FvObj)
self.__GetAprioriSection(FvObj, MacroDict.copy())
self.__GetAprioriSection(FvObj, MacroDict.copy())
while True:
IsInf = self.__GetInfStatement(FvObj, MacroDict.copy())
IsFile = self.__GetFileStatement(FvObj, MacroDict.copy())
if not IsInf and not IsFile:
break
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
FvImageSectionObj = CommonDataClass.FdfClass.FvImageSectionClassObject()
FvImageSectionObj.Alignment = AlignValue
if FvObj != None:
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
FvImageSectionObj.FvName = FvName
Obj.SectionList.append(FvImageSectionObj)
elif self.__IsKeyword("PEI_DEPEX_EXP") or self.__IsKeyword("DXE_DEPEX_EXP") or self.__IsKeyword("SMM_DEPEX_EXP"):
if AlignValue == 'Auto':
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
DepexSectionObj = CommonDataClass.FdfClass.DepexSectionClassObject()
DepexSectionObj.Alignment = AlignValue
DepexSectionObj.DepexType = self.__Token
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken( "{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__SkipToToken( "}"):
raise Warning("expected Depex expression ending '}' At Line ", self.FileName, self.CurrentLineNumber)
DepexSectionObj.Expression = self.__SkippedChars.rstrip('}')
Obj.SectionList.append(DepexSectionObj)
else:
if not self.__GetNextWord():
raise Warning("expected section type At Line ", self.FileName, self.CurrentLineNumber)
# Encapsulation section appear, UndoToken and return
if self.__Token == "COMPRESS" or self.__Token == "GUIDED":
self.SetFileBufferPos(OldPos)
return False
if self.__Token not in ("COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
"UI", "VERSION", "PEI_DEPEX", "SUBTYPE_GUID", "SMM_DEPEX"):
raise Warning("Unknown section type '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
if AlignValue == 'Auto'and (not self.__Token == 'PE32') and (not self.__Token == 'TE'):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
# DataSection
DataSectionObj = CommonDataClass.FdfClass.DataSectionClassObject()
DataSectionObj.Alignment = AlignValue
DataSectionObj.SecType = self.__Token
if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
if self.__FileCouldHaveRelocFlag(Obj.FvFileType) and self.__SectionCouldHaveRelocFlag(DataSectionObj.SecType):
if self.__Token == 'RELOCS_STRIPPED':
DataSectionObj.KeepReloc = False
else:
DataSectionObj.KeepReloc = True
else:
raise Warning("File type %s, section type %s, could not have reloc strip flag At Line %d" % (Obj.FvFileType, DataSectionObj.SecType, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
if self.__IsToken("="):
if not self.__GetNextToken():
raise Warning("expected section file path At Line ", self.FileName, self.CurrentLineNumber)
DataSectionObj.SectFileName = self.__Token
else:
if not self.__GetCglSection(DataSectionObj):
return False
Obj.SectionList.append(DataSectionObj)
return True
## __GetCglSection() method
#
# Get compressed or GUIDed section for Obj
#
# @param self The object pointer
# @param Obj for whom leaf section is got
# @param AlignValue alignment value for complex section
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetCglSection(self, Obj, AlignValue = None):
if self.__IsKeyword( "COMPRESS"):
type = "PI_STD"
if self.__IsKeyword("PI_STD") or self.__IsKeyword("PI_NONE"):
type = self.__Token
if not self.__IsToken("{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
CompressSectionObj = CommonDataClass.FdfClass.CompressSectionClassObject()
CompressSectionObj.Alignment = AlignValue
CompressSectionObj.CompType = type
# Recursive sections...
while True:
IsLeafSection = self.__GetLeafSection(CompressSectionObj)
IsEncapSection = self.__GetEncapsulationSec(CompressSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(CompressSectionObj)
# else:
# raise Warning("Compress type not known At Line ")
return True
elif self.__IsKeyword( "GUIDED"):
GuidValue = None
if self.__GetNextGuid():
GuidValue = self.__Token
AttribDict = self.__GetGuidAttrib()
if not self.__IsToken("{"):
raise Warning("expected '{' At Line ", self.FileName, self.CurrentLineNumber)
GuidSectionObj = CommonDataClass.FdfClass.GuidSectionClassObject()
GuidSectionObj.Alignment = AlignValue
GuidSectionObj.NameGuid = GuidValue
GuidSectionObj.SectionType = "GUIDED"
GuidSectionObj.ProcessRequired = AttribDict["PROCESSING_REQUIRED"]
GuidSectionObj.AuthStatusValid = AttribDict["AUTH_STATUS_VALID"]
# Recursive sections...
while True:
IsLeafSection = self.__GetLeafSection(GuidSectionObj)
IsEncapSection = self.__GetEncapsulationSec(GuidSectionObj)
if not IsLeafSection and not IsEncapSection:
break
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
Obj.SectionList.append(GuidSectionObj)
return True
return False
## __GetGuidAttri() method
#
# Get attributes for GUID section
#
# @param self The object pointer
# @retval AttribDict Dictionary of key-value pair of section attributes
#
def __GetGuidAttrib(self):
AttribDict = {}
AttribDict["PROCESSING_REQUIRED"] = False
AttribDict["AUTH_STATUS_VALID"] = False
if self.__IsKeyword("PROCESSING_REQUIRED") or self.__IsKeyword("AUTH_STATUS_VALID"):
AttribKey = self.__Token
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken() or self.__Token.upper() not in ("TRUE", "FALSE", "1", "0"):
raise Warning("expected TRUE/FALSE (1/0) At Line ", self.FileName, self.CurrentLineNumber)
AttribDict[AttribKey] = self.__Token
if self.__IsKeyword("PROCESSING_REQUIRED") or self.__IsKeyword("AUTH_STATUS_VALID"):
AttribKey = self.__Token
if not self.__IsToken("="):
raise Warning("expected '=' At Line ")
if not self.__GetNextToken() or self.__Token.upper() not in ("TRUE", "FALSE", "1", "0"):
raise Warning("expected TRUE/FALSE (1/0) At Line ", self.FileName, self.CurrentLineNumber)
AttribDict[AttribKey] = self.__Token
return AttribDict
## __GetEncapsulationSec() method
#
# Get encapsulation section for FILE
#
# @param self The object pointer
# @param FfsFile for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetEncapsulationSec(self, FfsFileObj):
OldPos = self.GetFileBufferPos()
if not self.__IsKeyword( "SECTION"):
if len(FfsFileObj.SectionList) == 0:
raise Warning("expected SECTION At Line ", self.FileName, self.CurrentLineNumber)
else:
return False
AlignValue = None
if self.__GetAlignment():
if self.__Token not in ("8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment '%s'" % self.__Token, self.FileName, self.CurrentLineNumber)
AlignValue = self.__Token
if not self.__GetCglSection(FfsFileObj, AlignValue):
self.SetFileBufferPos(OldPos)
return False
else:
return True
## __GetCapsule() method
#
# Get capsule section contents and store its data into capsule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a capsule
# @retval False Not able to find a capsule
#
def __GetCapsule(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[CAPSULE."):
if not S.startswith("[VTF.") and not S.startswith("[RULE.") and not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[CAPSULE.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
% (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [Capsule.] At Line ", self.FileName, self.CurrentLineNumber)
CapsuleObj = CommonDataClass.FdfClass.CapsuleClassObject()
CapsuleName = self.__GetUiName()
if not CapsuleName:
raise Warning("expected capsule name At line ", self.FileName, self.CurrentLineNumber)
CapsuleObj.UiCapsuleName = CapsuleName.upper()
if not self.__IsToken( "]"):
raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
if self.__IsKeyword("CREATE_FILE"):
if not self.__IsToken( "="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected file name At Line ", self.FileName, self.CurrentLineNumber)
CapsuleObj.CreateFile = self.__Token
self.__GetCapsuleStatements(CapsuleObj)
self.Profile.CapsuleList.append(CapsuleObj)
return True
## __GetCapsuleStatements() method
#
# Get statements for capsule
#
# @param self The object pointer
# @param Obj for whom statements are got
#
def __GetCapsuleStatements(self, Obj):
self.__GetCapsuleTokens(Obj)
self.__GetDefineStatements(Obj)
self.__GetSetStatements(Obj)
self.__GetCapsuleData(Obj)
## __GetCapsuleStatements() method
#
# Get token statements for capsule
#
# @param self The object pointer
# @param Obj for whom token statements are got
#
def __GetCapsuleTokens(self, Obj):
if not self.__IsKeyword("CAPSULE_GUID"):
raise Warning("expected 'CAPSULE_GUID' At Line ", self.FileName, self.CurrentLineNumber)
while self.__CurrentLine().find("=") != -1:
NameValue = self.__CurrentLine().split("=")
Obj.TokensDict[NameValue[0].strip()] = NameValue[1].strip()
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
## __GetCapsuleData() method
#
# Get capsule data for capsule
#
# @param self The object pointer
# @param Obj for whom capsule data are got
#
def __GetCapsuleData(self, Obj):
while True:
IsInf = self.__GetInfStatement(Obj, True)
IsFile = self.__GetFileStatement(Obj, True)
IsFv = self.__GetFvStatement(Obj)
if not IsInf and not IsFile and not IsFv:
break
## __GetFvStatement() method
#
# Get FV for capsule
#
# @param self The object pointer
# @param CapsuleObj for whom FV is got
# @retval True Successfully find a FV statement
# @retval False Not able to find a FV statement
#
def __GetFvStatement(self, CapsuleObj):
if not self.__IsKeyword("FV"):
return False
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextToken():
raise Warning("expected FV name At Line ", self.FileName, self.CurrentLineNumber)
# CapsuleFv = CapsuleData.CapsuleFv()
# CapsuleFv.FvName = self.__Token
# CapsuleObj.CapsuleDataList.append(CapsuleFv)
return True
## __GetRule() method
#
# Get Rule section contents and store its data into rule list of self.Profile
#
# @param self The object pointer
# @retval True Successfully find a Rule
# @retval False Not able to find a Rule
#
def __GetRule(self):
if not self.__GetNextToken():
return False
S = self.__Token.upper()
if S.startswith("[") and not S.startswith("[RULE."):
if not S.startswith("[OPTIONROM."):
raise Warning("Unknown section or section appear sequence error (The correct sequence should be [FD.], [FV.], [Capsule.], [VTF.], [Rule.], [OptionRom.])", self.FileName, self.CurrentLineNumber)
self.__UndoToken()
return False
self.__UndoToken()
if not self.__IsToken("[Rule.", True):
FileLineTuple = GetRealFileLine(self.FileName, self.CurrentLineNumber)
print 'Parsing String: %s in File %s, At line: %d, Offset Within Line: %d' \
% (self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine :], FileLineTuple[0], FileLineTuple[1], self.CurrentOffsetWithinLine)
raise Warning("expected [Rule.] At Line ", self.FileName, self.CurrentLineNumber)
if not self.__SkipToToken("."):
raise Warning("expected '.' At Line ", self.FileName, self.CurrentLineNumber)
Arch = self.__SkippedChars.rstrip(".")
if Arch.upper() not in ("IA32", "X64", "IPF", "EBC", "ARM", "COMMON"):
raise Warning("Unknown Arch '%s'" % Arch, self.FileName, self.CurrentLineNumber)
ModuleType = self.__GetModuleType()
TemplateName = ""
if self.__IsToken("."):
if not self.__GetNextWord():
raise Warning("expected template name At Line ", self.FileName, self.CurrentLineNumber)
TemplateName = self.__Token
if not self.__IsToken( "]"):
raise Warning("expected ']' At Line ", self.FileName, self.CurrentLineNumber)
RuleObj = self.__GetRuleFileStatements()
RuleObj.Arch = Arch.upper()
RuleObj.ModuleType = ModuleType
RuleObj.TemplateName = TemplateName
if TemplateName == '' :
self.Profile.RuleDict['RULE' + \
'.' + \
Arch.upper() + \
'.' + \
ModuleType.upper() ] = RuleObj
else :
self.Profile.RuleDict['RULE' + \
'.' + \
Arch.upper() + \
'.' + \
ModuleType.upper() + \
'.' + \
TemplateName.upper() ] = RuleObj
# self.Profile.RuleList.append(rule)
return True
## __GetModuleType() method
#
# Return the module type
#
# @param self The object pointer
# @retval string module type
#
def __GetModuleType(self):
if not self.__GetNextWord():
raise Warning("expected Module type At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token.upper() not in ("SEC", "PEI_CORE", "PEIM", "DXE_CORE", \
"DXE_DRIVER", "DXE_SAL_DRIVER", \
"DXE_SMM_DRIVER", "DXE_RUNTIME_DRIVER", \
"UEFI_DRIVER", "UEFI_APPLICATION", "USER_DEFINED", "DEFAULT", "BASE", \
"SECURITY_CORE", "COMBINED_PEIM_DRIVER", "PIC_PEIM", "RELOCATABLE_PEIM", \
"PE32_PEIM", "BS_DRIVER", "RT_DRIVER", "SAL_RT_DRIVER", "APPLICATION", "ACPITABLE", "SMM_CORE"):
raise Warning("Unknown Module type At line ", self.FileName, self.CurrentLineNumber)
return self.__Token
## __GetFileExtension() method
#
# Return the file extension
#
# @param self The object pointer
# @retval string file name extension
#
def __GetFileExtension(self):
if not self.__IsToken("."):
raise Warning("expected '.' At Line ", self.FileName, self.CurrentLineNumber)
Ext = ""
if self.__GetNextToken():
Pattern = re.compile(r'([a-zA-Z][a-zA-Z0-9]*)')
if Pattern.match(self.__Token):
Ext = self.__Token
return '.' + Ext
else:
raise Warning("Unknown file extension At Line ", self.FileName, self.CurrentLineNumber)
else:
raise Warning("expected file extension At Line ", self.FileName, self.CurrentLineNumber)
## __GetRuleFileStatement() method
#
# Get rule contents
#
# @param self The object pointer
# @retval Rule Rule object
#
def __GetRuleFileStatements(self):
if not self.__IsKeyword("FILE"):
raise Warning("expected FILE At Line ", self.FileName, self.CurrentLineNumber)
if not self.__GetNextWord():
raise Warning("expected FFS type At Line ", self.FileName, self.CurrentLineNumber)
Type = self.__Token.strip().upper()
if Type not in ("RAW", "FREEFORM", "SEC", "PEI_CORE", "PEIM",\
"PEI_DXE_COMBO", "DRIVER", "DXE_CORE", "APPLICATION", "FV_IMAGE", "SMM", "SMM_CORE"):
raise Warning("Unknown FV type At line ", self.FileName, self.CurrentLineNumber)
if not self.__IsToken("="):
raise Warning("expected '=' At Line ", self.FileName, self.CurrentLineNumber)
if not self.__IsKeyword("$(NAMED_GUID)"):
if not self.__GetNextWord():
raise Warning("expected $(NAMED_GUID)", self.FileName, self.CurrentLineNumber)
if self.__Token == 'PCD':
if not self.__IsToken( "("):
raise Warning("expected '('", self.FileName, self.CurrentLineNumber)
PcdPair = self.__GetNextPcdName()
if not self.__IsToken( ")"):
raise Warning("expected ')'", self.FileName, self.CurrentLineNumber)
self.__Token = 'PCD('+PcdPair[1]+'.'+PcdPair[0]+')'
NameGuid = self.__Token
KeepReloc = None
if self.__IsKeyword('RELOCS_STRIPPED') or self.__IsKeyword('RELOCS_RETAINED'):
if self.__FileCouldHaveRelocFlag(Type):
if self.__Token == 'RELOCS_STRIPPED':
KeepReloc = False
else:
KeepReloc = True
else:
raise Warning("File type %s could not have reloc strip flag At Line %d" % (Type, self.CurrentLineNumber), self.FileName, self.CurrentLineNumber)
KeyStringList = []
if self.__GetNextToken():
Pattern = re.compile(r'([a-zA-Z0-9\-]+|\$\(TARGET\)|\*)_([a-zA-Z0-9\-]+|\$\(TOOL_CHAIN_TAG\)|\*)_([a-zA-Z0-9\-]+|\$\(ARCH\)|\*)')
if Pattern.match(self.__Token):
KeyStringList.append(self.__Token)
if self.__IsToken(","):
while self.__GetNextToken():
if not Pattern.match(self.__Token):
raise Warning("expected KeyString \"Target_Tag_Arch\" At Line ", self.FileName, self.CurrentLineNumber)
KeyStringList.append(self.__Token)
if not self.__IsToken(","):
break
else:
self.__UndoToken()
Fixed = False
if self.__IsKeyword("Fixed", True):
Fixed = True
CheckSum = False
if self.__IsKeyword("CheckSum", True):
CheckSum = True
AlignValue = ""
if self.__GetAlignment():
if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment At Line ", self.FileName, self.CurrentLineNumber)
AlignValue = self.__Token
if self.__IsToken("{"):
# Complex file rule expected
Rule = RuleComplexFile.RuleComplexFile()
Rule.FvFileType = Type
Rule.NameGuid = NameGuid
Rule.Alignment = AlignValue
Rule.CheckSum = CheckSum
Rule.Fixed = Fixed
Rule.KeyStringList = KeyStringList
if KeepReloc != None:
Rule.KeepReloc = KeepReloc
while True:
IsEncapsulate = self.__GetRuleEncapsulationSection(Rule)
IsLeaf = self.__GetEfiSection(Rule)
if not IsEncapsulate and not IsLeaf:
break
if not self.__IsToken("}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
return Rule
elif self.__IsToken("|"):
# Ext rule expected
Ext = self.__GetFileExtension()
Rule = RuleSimpleFile.RuleSimpleFile()
Rule.FvFileType = Type
Rule.NameGuid = NameGuid
Rule.Alignment = AlignValue
Rule.CheckSum = CheckSum
Rule.Fixed = Fixed
Rule.FileExtension = Ext
Rule.KeyStringList = KeyStringList
if KeepReloc != None:
Rule.KeepReloc = KeepReloc
return Rule
else:
# Simple file rule expected
if not self.__GetNextWord():
raise Warning("expected leaf section type At Line ", self.FileName, self.CurrentLineNumber)
SectionName = self.__Token
if SectionName not in ("COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
"UI", "PEI_DEPEX", "VERSION", "SUBTYPE_GUID", "SMM_DEPEX"):
raise Warning("Unknown leaf section name '%s'" % SectionName, self.FileName, self.CurrentLineNumber)
if self.__IsKeyword("Fixed", True):
Fixed = True
if self.__IsKeyword("CheckSum", True):
CheckSum = True
if self.__GetAlignment():
if self.__Token not in ("Auto", "8", "16", "32", "64", "128", "512", "1K", "4K", "32K" ,"64K"):
raise Warning("Incorrect alignment At Line ", self.FileName, self.CurrentLineNumber)
if self.__Token == 'Auto' and (not SectionName == 'PE32') and (not SectionName == 'TE'):
raise Warning("Auto alignment can only be used in PE32 or TE section ", self.FileName, self.CurrentLineNumber)
AlignValue = self.__Token
if not self.__GetNextToken():
raise Warning("expected File name At Line ", self.FileName, self.CurrentLineNumber)
Rule = RuleSimpleFile.RuleSimpleFile()
Rule.SectionType = SectionName
Rule.FvFileType = Type
Rule.NameGuid = NameGuid
Rule.Alignment = AlignValue
Rule.CheckSum = CheckSum
Rule.Fixed = Fixed
Rule.FileName = self.__Token
Rule.KeyStringList = KeyStringList
if KeepReloc != None:
Rule.KeepReloc = KeepReloc
return Rule
## __GetEfiSection() method
#
# Get section list for Rule
#
# @param self The object pointer
# @param Obj for whom section is got
# @retval True Successfully find section statement
# @retval False Not able to find section statement
#
def __GetEfiSection(self, Obj):
OldPos = self.GetFileBufferPos()
if not self.__GetNextWord():
return False
SectionName = self.__Token
if SectionName not in ("COMPAT16", "PE32", "PIC", "TE", "FV_IMAGE", "RAW", "DXE_DEPEX",\
"UI", "VERSION", "PEI_DEPEX", "GUID", "SMM_DEPEX"):
self.__UndoToken()
return False
if SectionName == "FV_IMAGE":
FvImageSectionObj = FvImageSection.FvImageSection()
if self.__IsKeyword("FV_IMAGE"):
pass
if self.__IsToken( "{"):
FvObj = Fv.FV()
self.__GetDefineStatements(FvObj)
self.__GetBlockStatement(FvObj)
self.__GetSetStatements(FvObj)
self.__GetFvAlignment(FvObj)
self.__GetFvAttributes(FvObj)
self.__GetAprioriSection(FvObj)
self.__GetAprioriSection(FvObj)
while True:
IsInf = self.__GetInfStatement(FvObj)
IsFile = self.__GetFileStatement(FvObj)
if not IsInf and not IsFile:
break
if not self.__IsToken( "}"):
raise Warning("expected '}' At Line ", self.FileName, self.CurrentLineNumber)
FvImageSectionObj.Fv = FvObj
FvImageSectionObj.FvName = None
else:
| if not self.__IsKeyword("FV"): | 10,845 | lcc_e | python | null | 64c3a69e7851fde8cc6d137194738274c4b0e07c38266f93 |
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile('^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub('\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError, 'Strong dict for key ' + key + ' in ' + \
self.__class__.__name__
else:
that._properties[key] = value.copy()
else:
raise TypeError, 'Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError, \
self.__class__.__name__ + ' must implement Name'
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError, \
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name())
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError, "Can't make " + value.__class__.__name__ + ' printable'
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError, property + ' not in ' + self.__class__.__name__
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError, \
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError, "Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError, key + ' not in ' + self.__class__.__name__
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError, key + ' of ' + self.__class__.__name__ + ' must be list'
if not isinstance(value, property_type):
raise TypeError, 'item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError, self.__class__.__name__ + ' requires ' + property
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError, 'Found multiple children with path ' + child_path
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError, 'Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path)
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'ttf': 'file',
'xcassets': 'folder.assetcatalog',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError, name
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError, 'Variant values for ' + key
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError, \
self.__class__.__name__ + ' must implement FileGroup'
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError, 'Found multiple build files with path ' + path
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError, 'Found multiple build files for ' + \
xcfilelikeelement.Name()
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 10, # Frameworks
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError, 'Can\'t use path %s in a %s' % \
(path, self.__class__.__name__)
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the filen ame
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
# Extension override.
suffix = '.' + force_extension
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
| headers_phase = PBXHeadersBuildPhase() | 11,459 | lcc_e | python | null | 02fa887706420d04baac1896a028bd74ff94ed29cd375ad1 |
|
# A collection of tools to remotely access a CATMAID server via its API
#
# Copyright (C) 2017 Philipp Schlegel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""This module contains functions to request data from Catmaid server.
Examples
--------
>>> import pymaid
>>> # HTTP_USER AND HTTP_PASSWORD are only necessary if your server requires a
>>> # http authentification
>>> myInstance = pymaid.CatmaidInstance('www.your.catmaid-server.org',
... 'api_token',
... 'http_user', # Omit if not required
... 'http_password')
>>> # Get skeletal data for two neurons
>>> neuron_list = pymaid.get_neuron(['12345', '67890'], myInstance)
>>> neuron_list[0]
type <class 'pymaid.CatmaidNeuron'>
neuron_name Example neuron name
skeleton_id 12345
n_nodes 9924
n_connectors 437
n_branch_nodes 207
n_end_nodes 214
cable_length 1479.81
review_status NA
annotations False
igraph False
tags True
dtype: object
"""
import datetime
import re
import urllib
import webbrowser
import navis as ns
import numpy as np
import networkx as nx
import pandas as pd
from . import core, utils, config, cache
from navis import in_volume
__all__ = sorted(['get_annotation_details', 'get_annotation_id',
'get_annotation_list', 'get_annotations', 'get_arbor',
'get_connector_details', 'get_connectors',
'get_connector_tags',
'get_contributor_statistics', 'get_edges', 'get_history',
'get_logs', 'get_names', 'get_neuron',
'get_neurons', 'get_neurons_in_bbox',
'get_neurons_in_volume', 'get_node_tags', 'get_node_details',
'get_nodes_in_volume', 'get_partners',
'get_partners_in_volume', 'get_paths', 'get_review',
'get_review_details', 'get_skids_by_annotation',
'get_skids_by_name', 'get_node_info',
'get_node_table', 'get_user_annotations',
'get_user_list', 'get_volume', 'has_soma', 'neuron_exists',
'get_segments',
'get_connectors_between', 'url_to_coordinates',
'get_label_list', 'find_neurons',
'get_skid_from_node', 'get_transactions',
'get_connector_links',
'get_nth_partners', 'find_nodes',
'get_node_location', 'get_annotated',
'get_neuron_id',
'get_connectors_in_bbox',
'get_cable_lengths',
'get_connectivity_counts',
'get_import_info',
'get_origin', 'get_skids_by_origin',
'get_sampler', 'get_sampler_domains', 'get_sampler_counts',
'get_skeleton_change'])
# Set up logging
logger = config.logger
@cache.undo_on_error
def get_neuron(x, with_connectors=True, with_tags=True, with_history=False,
with_merge_history=False, with_abutting=False, return_df=False,
fetch_kwargs={}, init_kwargs={}, raise_missing=True,
remote_instance=None):
"""Retrieve 3D skeleton data as CatmaidNeuron/List.
Parameters
----------
x
Can be either:
1. list of skeleton ID(s), int or str
2. list of neuron name(s), str, exact match
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
with_connectors : bool, optional
If True, will include connector data.
Note: the CATMAID API endpoint does currently not
support retrieving abutting connectors this way.
Please use ``with_abutting=True`` to include
abutting connectors.
with_tags : bool, optional
If True, will include node tags.
with_history: bool, optional
If True, the returned node data will contain
creation date and last modified for each
node.
ATTENTION: if ``with_history=True``, nodes/connectors
that have been moved since their creation will have
multiple entries reflecting their changes in position!
Each state has the date it was modified as creation
date and the next state's date as last modified. The
most up to date state has the original creation date
as last modified.
The creator_id is always the original creator though.
with_abutting: bool, optional
If True, will retrieve abutting connectors.
For some reason they are not part of compact-json, so
they have to be retrieved via a separate API endpoint
-> will show up as connector type 3!
return_df : bool, optional
If True, a ``pandas.DataFrame`` instead of
``CatmaidNeuron``/``CatmaidNeuronList`` is returned.
fetch_kwargs : dict, optional
Above BOOLEAN parameters can also be passed as dict.
This is then used in CatmaidNeuron objects to
override implicitly set parameters!
init_kwargs : dict, optional
Keyword arguments passed when initializing
``CatmaidNeuron``/``CatmaidNeuronList``.
raise_missing : bool, optional
If True and any of the queried neurons can not be
found, raise an exception. Else just log a warning.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
:class:`~pymaid.CatmaidNeuron`
For single neurons.
:class:`~pymaid.CatmaidNeuronList`
For a list of neurons.
pandas.DataFrame
If ``return_df=True``
Notes
-----
The returned objects contain for each neuron::
neuron_name : str
skeleton_id : str
nodes / connectors : pandas.DataFrames containing node/connector
ID, coordinates, parent nodes, etc.
tags : dict containing the node tags:
``{'tag': [node_id, node_id, ...]}``
Dataframe column titles for ``nodes`` and ``connectors`` should be
self-explanatory with the exception of ``relation`` in connector table.
This columns describes the connection ("relation") from the neuron's
node TO the connector::
connectors['relation']
0 = "presynaptic_to" -> this is a presynapse for this neuron
1 = "postsynaptic_to" -> this is a postsynapse for this neuron
2 = "gapjunction_with"
3 = "abutting" (not returned by default)
-1 = other (hypothetical as CATMAID does only return the above)
Examples
--------
>>> # Get a single neuron by skeleton id
>>> n = pymaid.get_neuron(16)
>>> # Get a bunch of neurons by annotation
>>> n = pymaid.get_neuron('annotation:glomerulus DA1')
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
# Update from kwargs if available
with_tags = fetch_kwargs.get('with_tags', with_tags)
with_connectors = fetch_kwargs.get('with_connectors', with_connectors)
with_history = fetch_kwargs.get('with_history', with_history)
with_merge_history = fetch_kwargs.get('with_merge_history', with_merge_history)
with_abutting = fetch_kwargs.get('with_abutting', with_abutting)
return_df = fetch_kwargs.get('return_df', return_df)
# Generate URLs to retrieve
urls = [remote_instance._get_compact_details_url(s,
with_history=str(with_history).lower(),
with_tags=str(with_tags).lower(),
with_connectors=str(with_connectors).lower(),
with_merge_history=str(with_merge_history).lower()) for s in x]
skdata = remote_instance.fetch(urls, desc='Fetch neurons')
# Retrieve abutting
if with_abutting:
urls = [remote_instance._get_connector_links_url(**{'skeleton_ids[0]': str(s),
'relation_type': 'abutting'}) for s in x]
cn_data = remote_instance.fetch(urls, desc='Fetch abutting cn')
# Add abutting to other connectors in skdata with type == 3
for i, cn in enumerate(cn_data):
if not with_history:
skdata[i][1] += [[c[7], c[1], 3, c[2], c[3], c[4]]
for c in cn['links']]
else:
skdata[i][1] += [[c[7], c[1], 3, c[2], c[3], c[4], c[8], None]
for c in cn['links']]
# Get neuron names
names = get_names(x, remote_instance=remote_instance)
# Parse column names
node_cols = ['node_id', 'parent_id', 'creator_id', 'x', 'y', 'z',
'radius', 'confidence']
cn_cols = ['node_id', 'connector_id', 'type', 'x', 'y', 'z']
if with_history:
node_cols += ['last_modified', 'creation_date', 'still_on_skeleton']
cn_cols += ['last_modified', 'creation_date']
missing = [s for s, d in zip(x, skdata) if not d[0]]
if missing:
msg = 'The following skeleton ID(s) could not be found: {}'.format(', '.join(missing))
if raise_missing:
raise ValueError(msg)
else:
logger.warning(msg)
# Convert data to appropriate dtypes
node_dtypes = {'node_id': np.int32,
'parent_id': np.int32,
'creator_id': 'category',
'x': np.float32,
'y': np.float32,
'z': np.float32,
'radius': np.float32,
'confidence': 'category'}
cn_dtypes = {'node_id': np.int32,
'type': 'category',
'connector_id': np.int32,
'x': np.float32,
'y': np.float32,
'z': np.float32}
def make_node_table(x):
"""Generate node table (incl. correct data types)."""
df = pd.DataFrame(x, columns=node_cols).fillna(-1) # do not remove fillna
return df.astype(node_dtypes)
def make_cn_table(x):
"""Generate connector table (incl. correct data types)."""
df = pd.DataFrame(x, columns=cn_cols)
return df.astype(cn_dtypes)
# Generate DataFrame containing all neurons
df = pd.DataFrame([[names[str(x[i])], # neuron name
str(x[i]), # skeleton ID
make_node_table(n[0]), # nodes
make_cn_table(n[1]), # connectors
n[2] # tags as dictionary
] for i, n in enumerate(skdata) if n[0]],
columns=['neuron_name', 'skeleton_id',
'nodes', 'connectors', 'tags'])
if return_df:
return df
nl = core.CatmaidNeuronList(df, remote_instance=remote_instance, **init_kwargs)
return nl[0] if len(nl) == 1 and len(x) == 1 else nl
# This is for legacy reasons -> will remove eventually
get_neurons = get_neuron
@cache.undo_on_error
def get_arbor(x, node_flag=1, connector_flag=1, tag_flag=1, remote_instance=None):
"""Retrieve skeleton data for a list of skeleton ids.
Similar to :func:`pymaid.get_neuron` but the connector data includes
the whole chain::
node1 -> (link_confidence) -> connector -> (link_confidence) -> node2
This means that connectors can shop up multiple times (i.e. if they have
multiple postsynaptic targets). Does include connector ``x, y, z``
coordinates!
Parameters
----------
x
Neurons to retrieve. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
connector_flag : 0 | 1, optional
Set if connector data should be retrieved.
tag_flag : 0 | 1, optional
Set if tags should be retrieved.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a neuron::
neuron_name skeleton_id nodes connectors tags
0 str str DataFrame DataFrame dict
1
2
Notes
-----
- nodes and connectors are pandas.DataFrames themselves
- tags is a dict: ``{'tag': [node_id, node_id, ...]}``
Dataframe (df) column titles should be self explanatory with these exception:
- ``df['relation_1']`` describes node1 to/from connector
- ``df['relation_2']`` describes node2 to/from connector
- ``relation`` can be: ``0`` (presynaptic), ``1`` (postsynaptic), ``2`` (gap junction)
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
skdata = []
for s in config.tqdm(x, desc='Retrieving arbors', disable=config.pbar_hide,
leave=config.pbar_leave):
# Create URL for retrieving example skeleton from server
remote_compact_arbor_url = remote_instance._get_compact_arbor_url(
s, node_flag, connector_flag, tag_flag)
# Retrieve node_data for example skeleton
arbor_data = remote_instance.fetch(remote_compact_arbor_url)
skdata.append(arbor_data)
logger.debug('%s retrieved' % str(s))
names = get_names(x, remote_instance)
df = pd.DataFrame([[
names[str(x[i])],
str(x[i]),
pd.DataFrame(n[0], columns=['node_id', 'parent_id', 'creator_id',
'x', 'y', 'z', 'radius', 'confidence']),
pd.DataFrame(n[1], columns=['node_1', 'link_confidence',
'connector_id', 'link_confidence',
'node_2', 'other_skeleton_id',
'relation_1', 'relation_2']),
n[2]]
for i, n in enumerate(skdata)
],
columns=['neuron_name', 'skeleton_id', 'nodes', 'connectors', 'tags'],
dtype=object
)
return df
@cache.undo_on_error
def get_partners_in_volume(x, volume, syn_threshold=None, min_size=2,
remote_instance=None):
"""Retrieve the synaptic/gap junction partners within a CATMAID Volume.
Important
---------
Connectivity (total number of connections) returned is restricted to
that volume.
Parameters
----------
x
Neurons to check. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
volume : str | list of str | navis.Volume
Name of the CATMAID volume to test OR volume dict with
{'vertices':[],'faces':[]} as returned by e.g.
:func:`~pymaid.get_volume()`.
syn_threshold : int, optional
Synapse threshold. This threshold is applied to the
TOTAL number of synapses across all neurons!
min_size : int, optional
Minimum node count of partner
(default = 2 -> hide single-node partner).
remote_instance : CatmaidInstance
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a neuron and the number of
synapses with the query neurons::
neuron_name skeleton_id num_nodes relation skid1 skid2 ...
1 name1 skid1 node_count1 upstream n_syn n_syn ..
2 name2 skid2 node_count2 downstream n_syn n_syn .
3 name3 skid3 node_count3 gapjunction n_syn n_syn .
- Relation can be: upstream (incoming), downstream (outgoing) of the
neurons of interest or gap junction
- partners can show up multiple times if they are e.g. pre- AND
postsynaptic
- the number of connections between two partners is restricted to the
volume
See Also
--------
:func:`~pymaid.get_neurons_in_volume`
Get all neurons within given volume.
:func:`~pymaid.filter_connectivity`
Filter connectivity table or adjacency matrix by volume(s) or to
parts of neuron(s).
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
# First, get list of connectors
cn_data = get_connectors(x, remote_instance=remote_instance)
# Find out which connectors are in the volume of interest
if isinstance(volume, str):
volume = get_volume(volume, remote_instance=remote_instance)
elif isinstance(volume, (list, np.ndarray)):
for i in range(len(volume)):
if isinstance(volume[i], str):
volume[i] = get_volume(volume[i],
remote_instance=remote_instance)
iv = in_volume(cn_data[['x', 'y', 'z']], volume)
# Get the subset of connectors within the volume
cn_in_volume = cn_data[iv].copy()
logger.info('{} unique connectors in volume. Reconstructing connectivity'
'...'.format(len(cn_in_volume.connector_id.unique())))
# Get details for connectors in volume
cn_details = get_connector_details(cn_in_volume.connector_id.unique(),
remote_instance=remote_instance)
# Filter those connectors that don't have a presynaptic node
cn_details = cn_details[~cn_details.presynaptic_to.isnull()]
# Now reconstruct connectivity table from connector details
# Some connectors may be connected to the same neuron multiple times
# In those cases there will be more node IDs in "postsynaptic_to_node"
# than there are skeleton IDs in "postsynaptic_to". Then we need to map
# node IDs to neurons
mismatch = cn_details[cn_details.postsynaptic_to.apply(
len) < cn_details.postsynaptic_to_node.apply(len)]
match = cn_details[cn_details.postsynaptic_to.apply(
len) >= cn_details.postsynaptic_to_node.apply(len)]
if not mismatch.empty:
logger.info('Retrieving additional details for {0} '
'connectors'.format(mismatch.shape[0]))
tn_to_skid = get_skid_from_node([tn for l in mismatch.postsynaptic_to_node.values for tn in l],
remote_instance=remote_instance)
else:
tn_to_skid = []
# Now collect edges
edges = [[cn.presynaptic_to, skid]
for cn in match.itertuples() for skid in cn.postsynaptic_to]
edges += [[cn.presynaptic_to, tn_to_skid[tn]]
for cn in mismatch.itertuples() for tn in cn.postsynaptic_to_node]
# Turn edges into synaptic connections
unique_edges, counts = np.unique(edges, return_counts=True, axis=0)
unique_skids = np.unique(edges).astype(str)
unique_edges = unique_edges.astype(str)
# Create empty adj_mat
adj_mat = pd.DataFrame(np.zeros((len(unique_skids), len(unique_skids))),
columns=unique_skids, index=unique_skids)
for i, e in enumerate(config.tqdm(unique_edges,
disable=config.pbar_hide,
desc='Adj. matrix',
leave=config.pbar_leave)):
# using df.at here speeds things up tremendously!
adj_mat.loc[str(e[0]), str(e[1])] = counts[i]
# There is a chance that our original neurons haven't made it through
# filtering (i.e. they don't have partners in the volume ). We will simply
# add these rows and columns and set them to 0
missing = [n for n in x if n not in adj_mat.columns]
for n in missing:
adj_mat[n] = 0
missing = [n for n in x if n not in adj_mat.index]
for n in missing:
adj_mat.loc[n] = [0 for i in range(adj_mat.shape[1])]
# Generate connectivity table
all_upstream = adj_mat.T[adj_mat.T[x].sum(axis=1) > 0][x]
all_upstream['skeleton_id'] = all_upstream.index
all_upstream['relation'] = 'upstream'
all_downstream = adj_mat[adj_mat[x].sum(axis=1) > 0][x]
all_downstream['skeleton_id'] = all_downstream.index
all_downstream['relation'] = 'downstream'
# Merge tables
df = pd.concat([all_upstream, all_downstream], axis=0, ignore_index=True)
# We will use this to get name and size of neurons
logger.info('Collecting additional info for {0} neurons'.format(
len(df.skeleton_id.unique())))
review = get_review(df.skeleton_id.unique(),
remote_instance=remote_instance)
num_nodes = review.set_index('skeleton_id').total_node_count.to_dict()
names = get_names(df.skeleton_id.unique(), remote_instance=remote_instance)
df['neuron_name'] = df.skeleton_id.map(names)
df['num_nodes'] = df.skeleton_id.map(num_nodes)
df['total'] = df[x].sum(axis=1)
# Filter for min size
df = df[df.num_nodes >= min_size]
# Filter for synapse threshold
if syn_threshold:
df = df[df.total >= syn_threshold]
# Reorder columns
df = df[['neuron_name', 'skeleton_id', 'num_nodes', 'relation', 'total'] + x]
df.sort_values(['relation', 'total'], inplace=True, ascending=False)
return df.reset_index(drop=True)
@cache.undo_on_error
def get_nth_partners(x, n_circles=1, min_pre=2, min_post=2,
remote_instance=None):
"""Retrieve Nth partners.
Partners that are directly (``n_circles = 1``) or via N "hops"
(``n_circles>1``) connected to a set of seed neurons.
Parameters
----------
x
Seed neurons for which to retrieve partners. Can be:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
n_circles : int, optional
Number of circles around your seed neurons.
min_pre/min_post : int, optional
Synapse threshold. Set to -1 to not get any pre-/post
synaptic partners.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame each row represents a partner::
neuron_name skeleton_id
0 name1 123
1 name2 456
2 ... ...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
url = remote_instance._get_circles_of_hell_url()
post = {'n_circles': n_circles, 'min_pre': min_pre, 'min_post': min_post}
post.update({'skeleton_ids[{}]'.format(i): s for i, s in enumerate(x)})
# Returns list of skids [0] and names dict [1]
resp = remote_instance.fetch(url, post=post)
# If no neurons returned, return empty DataFrame
if resp[1]:
# Generate DataFrame
df = pd.DataFrame.from_dict(resp[1], orient='index').reset_index()
df.columns = ['skeleton_id', 'neuron_name']
else:
df = pd.DataFrame([], columns=['skeleton_id', 'neuron_name'])
return df
@cache.undo_on_error
def get_partners(x, threshold=1, min_size=2, filt=[], min_confidence=1,
directions=['incoming', 'outgoing',
'gapjunctions', 'attachments'],
remote_instance=None):
"""Retrieve partners connected by synapses, gap junctions or attachments.
Note
----
This function treats multiple fragments with the same skeleton ID
(e.g. from splits into axon & dendrites) as a single neuron when fetching
data from the server. For "fragmented" connectivity use
:func:`~pymaid.cn_table_from_connectors` instead.
Parameters
----------
x
Neurons for which to retrieve partners. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
threshold : int, optional
Minimum # of links (synapses/gap-junctions/etc).
min_size : int, optional
Minimum node count of partner
(default=2 to hide single-node partners).
filt : list of str, optional
Filters partners for neuron names (must be exact) or
skeleton_ids.
min_confidence : int | None, optional
If set, edges with lower confidence will be ignored.
Applied before ``threshold``.
directions : 'incoming' | 'outgoing' | 'gapjunctions' | 'attachments', optional
Use to restrict to either up- or downstream partners.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a neuron and the number of
synapses with the query neurons::
neuron_name skeleton_id num_nodes relation total skid1 skid2 ...
0 name1 skid1 node_count1 upstream n_syn n_syn ...
1 name2 skid2 node_count2 downstream n_syn n_syn ..
2 name3 skid3 node_count3 gapjunction n_syn n_syn .
...
``relation`` can be ``'upstream'`` (incoming), ``'downstream'``
(outgoing), ``'attachment'`` or ``'gapjunction'`` (gap junction).
Warning
-------
By default, will exclude single node partners! Set ``min_size=1`` to return
ALL partners including placeholder nodes.
Notes
-----
Partners can show up multiple times if they are e.g. pre- AND postsynaptic!
Examples
--------
>>> example_skids = [16, 201, 150, 20]
>>> cn = pymaid.get_partners(example_skids)
>>> # Get only upstream partners
>>> subset = cn[ cn.relation == 'upstream' ]
>>> # Get partners with more than e.g. 5 synapses across all neurons
>>> subset2 = cn[ cn[example_skids].sum(axis=1) > 5 ]
>>> # Combine above conditions (watch parentheses!)
>>> subset3 = cn[(cn.relation=='upstream') &
... (cn[example_skids].sum(axis=1) > 5)]
See Also
--------
:func:`~pymaid.adjacency_matrix`
Use if you need an adjacency matrix instead of a table.
:func:`~pymaid.get_partners_in_volume`
Use if you only want connectivity within a given volume.
:func:`~pymaid.filter_connectivity`
Use to restrict connector table to given part of a neuron
or a volume.
:func:`~cn_table_from_connectors`
Returns "fragmented" connectivity. Use e.g. if you are
working with multiple fragments from the same neuron.
"""
if not isinstance(min_confidence, (float, int)) or min_confidence < 0 or min_confidence > 5:
raise ValueError('min_confidence must be 0-5.')
# This maps CATMAID JSON relations to more relatable terms (I think)
relations = {'incoming': 'upstream',
'outgoing': 'downstream',
'gapjunctions': 'gapjunction',
'attachments': 'attachment'}
# Catch some easy mistakes regarding relations:
repl = {v: k for k, v in relations.items()}
directions = [repl.get(d, d) for d in directions]
wrong_dir = set(directions) - set(relations.keys())
if wrong_dir:
raise ValueError('Unknown direction "{}". Please use a combination '
'of "{}"'.format(', '.join(wrong_dir),
', '.join(relations.keys())))
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
x = np.array(x).astype(str)
remote_connectivity_url = remote_instance._get_connectivity_url()
connectivity_post = {}
connectivity_post['boolean_op'] = 'OR'
connectivity_post['with_nodes'] = False
for i, skid in enumerate(x):
tag = 'source_skeleton_ids[{0}]'.format(i)
connectivity_post[tag] = skid
logger.info('Fetching connectivity table for {} neurons'.format(len(x)))
connectivity_data = remote_instance.fetch(remote_connectivity_url,
post=connectivity_post)
# Delete directions that we don't want
connectivity_data.update(
{d: [] for d in connectivity_data if d not in directions})
# Get neurons' names
names = get_names([n for d in connectivity_data for n in connectivity_data[
d]] + list(x), remote_instance=remote_instance)
df = pd.DataFrame(columns=['neuron_name', 'skeleton_id',
'num_nodes', 'relation'] + list(x))
# Number of synapses is returned as list of links with 0-5 confidence:
# {'skid': [0, 1, 2, 3, 4, 5]}
# This is being collapsed into a single value before returning it.
for d in relations:
if d not in connectivity_data:
continue
df_temp = pd.DataFrame([[
names[str(n)],
str(n),
int(connectivity_data[d][n]['num_nodes']),
relations[d]] +
[sum(connectivity_data[d][n]['skids'].get(s,
[0, 0, 0, 0, 0])[min_confidence - 1:]) for s in x]
for i, n in enumerate(connectivity_data[d])
],
columns=['neuron_name', 'skeleton_id', 'num_nodes',
'relation'] + [str(s) for s in x],
dtype=object
)
df = pd.concat([df, df_temp], axis=0)
df['total'] = df[x].sum(axis=1).values
# Now filter for synapse threshold and size
df = df[(df.num_nodes >= min_size) & (df.total >= threshold)]
df.sort_values(['relation', 'total'], inplace=True, ascending=False)
if filt:
if not isinstance(filt, (list, np.ndarray)):
filt = [filt]
filt = [str(s) for s in filt]
df = df[df.skeleton_id.isin(filt) | df.neuron_name.isin(filt)]
df.datatype = 'connectivity_table'
# Return reindexed concatenated dataframe
df.reset_index(drop=True, inplace=True)
logger.info('Done. Found {0} pre-, {1} postsynaptic and {2} gap '
'junction-connected neurons'.format(
*[df[df.relation == r].shape[0] for r in ['upstream',
'downstream',
'gapjunction']]))
return df
@cache.undo_on_error
def get_names(x, remote_instance=None):
"""Retrieve neuron names for a list of skeleton ids.
Parameters
----------
x
Neurons for wich to retrieve names. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
Either pass directly to function or define
globally as ``remote_instance``.
Returns
-------
dict
``{skid1: 'neuron_name', skid2: 'neuron_name', ...}``
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
x = list(set(x))
remote_get_names_url = remote_instance._get_neuronnames()
get_names_postdata = {}
get_names_postdata['self.project_id'] = remote_instance.project_id
for i in range(len(x)):
key = 'skids[%i]' % i
get_names_postdata[key] = x[i]
names = remote_instance.fetch(remote_get_names_url, post=get_names_postdata)
logger.debug('Names for {} of {} skeleton IDs retrieved'.format(len(names),
len(x)))
return names
@cache.undo_on_error
def get_node_details(x, chunk_size=10000, convert_ts=True, remote_instance=None):
"""Retrieve detailed info for nodes and/or connectors.
Parameters
----------
x : list | CatmaidNeuron | CatmaidNeuronList
List of node ids: can be node or connector IDs!
If CatmaidNeuron/List will get both, nodes and
connectors!
chunk_size : int, optional
Querying large number of nodes will result in server
errors. We will thus query them in amenable bouts.
convert_ts : bool, optional
If True, will convert timestamps from strings to
datetime objects.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a node::
node_id creation_time creator edition_time ...
0
1
editor reviewers review_times
0
1
"""
if isinstance(x, (core.CatmaidNeuron, core.CatmaidNeuronList)):
node_ids = np.append(x.nodes.node_id.values,
x.connectors.connector_id.values)
elif not isinstance(x, (list, tuple, np.ndarray)):
node_ids = [x]
else:
node_ids = x
remote_instance = utils._eval_remote_instance(remote_instance)
logger.debug('Retrieving details for {} nodes...'.format(len(node_ids)))
urls = []
post = []
for ix in range(0, len(node_ids), chunk_size):
urls.append(remote_instance._get_node_info_url())
post.append({'node_ids[{}]'.format(k): tn for k, tn in enumerate(node_ids[ix:ix + chunk_size])})
# Get responses
resp = remote_instance.fetch(urls, post=post, desc='Chunks')
# Merge into a single dictionary
data = {k: d[k] for d in resp for k in d}
# Generate dataframe
data_columns = ['creation_time', 'user', 'edition_time',
'editor', 'reviewers', 'review_times']
df = pd.DataFrame(
[[e] + [d[k] for k in data_columns] for e, d in data.items()],
columns=['node_id'] + data_columns,
dtype=object
)
# Rename column 'user' to 'creator'
df.rename({'user': 'creator'}, axis='columns', inplace=True)
if convert_ts:
df['creation_time'] = pd.to_datetime(df.creation_time)
df['edition_time'] = pd.to_datetime(df.edition_time)
df['review_times'] = df.review_times.apply(lambda x: [pd.to_datetime(d)
for d in x])
return df
@cache.undo_on_error
def get_skid_from_node(node_ids, remote_instance=None):
"""Retrieve skeleton IDs from a list of nodes.
Parameters
----------
node_ids : int | list of int
Node ID(s) to retrieve skeleton IDs for.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
``{node_id: skeleton_ID, ...}``. If node does not exists,
``skeleton_ID`` will be ``None``.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
node_ids = utils.eval_node_ids(node_ids, connectors=False, nodes=True)
if not isinstance(node_ids, (list, np.ndarray)):
node_ids = [node_ids]
urls = [remote_instance._get_skid_from_tnid(tn) for tn in node_ids]
data = remote_instance.fetch(urls, desc='Fetch skids')
return {node_ids[i]: d.get('skeleton_id', None) for i, d in enumerate(data)}
@cache.undo_on_error
def get_node_table(x, include_details=True, convert_ts=True, remote_instance=None):
"""Retrieve node table(s) for a list of neurons.
Parameters
----------
x
Catmaid Neuron(s) as single or list of either:
1. skeleton IDs (int or str)
2. neuron name (str, exact match)
3. annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
include_details : bool, optional
If True, tags and reviewer are included in the table.
For larger lists, it is recommended to set this to
False to improve performance.
convert_ts : bool, optional
If True, will convert edition timestamp to pandas
datetime. Set to False to improve performance.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a node::
skeleton_id node_id parent_id confidence x y z ...
0
1
2
...
radius creator last_edition reviewers tag
0
1
2
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
logger.info('Retrieving {} node table(s)...'.format(len(x)))
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
# Generate URLs to retrieve
urls = []
for skid in x:
remote_nodes_list_url = remote_instance._get_skeleton_nodes_url(skid)
urls.append(remote_nodes_list_url)
node_list = remote_instance.fetch(urls, desc='Get tables')
logger.info('{} nodes retrieved. Creating table..'
'.'.format(sum([len(nl[0]) for nl in node_list])))
all_tables = []
for i, nl in enumerate(config.tqdm(node_list,
desc='Creating table',
leave=config.pbar_leave,
disable=config.pbar_hide)):
this_df = pd.DataFrame(nl[0],
columns=['node_id', 'parent_node_id',
'confidence', 'x', 'y', 'z', 'radius',
'creator', 'last_edited']
)
# Parent IDs can be `None` here - we will set them to -1
this_df.loc[this_df.parent_node_id.isnull(), 'parent_node_id'] = -1
this_df['parent_node_id'] = this_df.parent_node_id.astype(int)
# Keep track of skeleton ID
this_df['skeleton_id'] = x[i]
if include_details:
tag_dict = {}
for t in nl[2]:
tag_dict[t[0]] = tag_dict.get(t[0], []) + [t[1]]
reviewer_dict = {}
for r in nl[1]:
reviewer_dict[r[0]] = reviewer_dict.get(r[0], []) + [user_dict.get(r[1])]
this_df['reviewers'] = this_df.node_id.map(reviewer_dict)
this_df['tags'] = this_df.node_id.map(tag_dict)
all_tables.append(this_df)
# Concatenate all DataFrames
tn_table = pd.concat(all_tables, axis=0, ignore_index=True)
# Replace creator_id with their login and make it a categorical
tn_table['creator'] = tn_table.creator.map(user_dict).astype('category')
# Replace timestamp with datetime object
if convert_ts:
tn_table['last_edited'] = pd.to_datetime(tn_table.last_edited,
utc=True,
unit='s')
return tn_table
@cache.undo_on_error
def get_edges(x, remote_instance=None):
"""Retrieve edges between sets of neurons.
Synaptic connections only!
Parameters
----------
x
Neurons for which to retrieve edges. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents an edge::
source target weight
1
2
3
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
remote_get_edges_url = remote_instance._get_edges_url()
get_edges_postdata = {}
get_edges_postdata['confidence_threshold'] = '0'
for i in range(len(x)):
key = 'skeleton_ids[%i]' % i
get_edges_postdata[key] = x[i]
edges = remote_instance.fetch(remote_get_edges_url, post=get_edges_postdata)
df = pd.DataFrame([[e[0], e[1], sum(e[2])] for e in edges['edges']],
columns=['source', 'target', 'weight']
)
return df
@cache.undo_on_error
def get_connectors(x, relation_type=None, tags=None, remote_instance=None):
"""Retrieve connectors based on a set of filters.
Parameters
----------
x
Neurons for which to retrieve connectors. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
5. ``None`` if you want all fetch connectors that
match other criteria
relation_type : 'presynaptic_to' | 'postsynaptic_to' | 'gapjunction_with' | 'abutting' | 'attached_to', optional
If provided, will filter for these connection types.
tags : str | list of str, optional
If provided, will filter connectors for tag(s).
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a connector::
connector_id x y z confidence creator ...
0
1
...
editor creation_time edition_time
0
1
...
Examples
--------
Get all connectors for a single neuron:
>>> cn = pymaid.get_connectors(16)
Get every connector with a given tag:
>>> tagged_cn = pymaid.get_connectors(None, tags='FML_sample')
Get all tagged connectors for a set of neurons:
>>> tagged_cn2 = pymaid.get_connectors('annotation:glomerulus DA1',
tags='FML_sample')
See Also
--------
:func:`~pymaid.get_connector_details`
If you need details about the connectivity of a connector
:func:`~pymaid.get_connectors_between`
If you need to find the connectors between sets of neurons.
:func:`~pymaid.get_connector_links`
If you ned details about links for each connector.
:func:`pymaid.find_nodes`
Function to get nodes by tags, IDs or skeleton.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if not isinstance(x, type(None)):
x = utils.eval_skids(x, remote_instance=remote_instance)
remote_get_connectors_url = remote_instance._get_connectors_url()
postdata = {'with_tags': 'true', 'with_partners': 'true'}
# Add skeleton IDs filter (if applicable)
if not isinstance(x, type(None)):
postdata.update(
{'skeleton_ids[{0}]'.format(i): s for i, s in enumerate(x)})
# Add tags filter (if applicable)
if not isinstance(tags, type(None)):
if not isinstance(tags, (list, np.ndarray)):
tags = [tags]
postdata.update({'tags[{0}]'.format(i): str(t)
for i, t in enumerate(tags)})
# Add relation_type filter (if applicable)
allowed_relations = ['presynaptic_to', 'postsynaptic_to',
'gapjunction_with', 'abutting', 'attached_to']
if not isinstance(relation_type, type(None)):
if relation_type not in allowed_relations:
raise ValueError('Unknown relation type "{0}". Must be in '
'{1}'.format(relation_type, allowed_relations))
postdata.update({'relation_type': relation_type})
data = remote_instance.fetch(remote_get_connectors_url, post=postdata)
# creator_id and editor_id will be replaced with logins later
df = pd.DataFrame(data=data['connectors'],
columns=['connector_id', 'x', 'y', 'z', 'confidence',
'creator_id', 'editor_id', 'creation_time',
'edition_time'])
# Add tags
df['tags'] = df.connector_id.astype(str).map(data['tags'])
# Map hardwire connector type ID to their type name
# ATTENTION: "attachment" can be part of any connector type
rel_ids = {r['relation_id']: r for r in config.link_types}
# Get connector type IDs
cn_ids = {k: v[0][3] for k, v in data['partners'].items()}
# Map type ID to relation (also note conversion of connector ID to integer)
cn_type = {int(k): rel_ids.get(v, {'type': 'unknown'})['type']
for k, v in cn_ids.items()}
# Map connector ID to connector type
df['type'] = df.connector_id.map(cn_type)
# Add creator login instead of id
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
df['creator'] = df.creator_id.map(user_dict)
df['editor'] = df.editor_id.map(user_dict)
df.drop(['creator_id', 'editor_id'], inplace=True, axis=1)
# Convert timestamps to datetimes
df['creation_time'] = df['creation_time'].apply(
datetime.datetime.fromtimestamp)
df['edition_time'] = df['edition_time'].apply(
datetime.datetime.fromtimestamp)
df.datatype = 'connector_table'
return df
@cache.undo_on_error
def get_connector_links(x, with_tags=False, chunk_size=50,
remote_instance=None):
"""Retrieve connectors links for a set of neurons.
In essence, this will get you all "arrows" that point from a connector to
your neuron or from your neuron to a connector. It does NOT give you the
entire battery of connectors for a set of connectors. For that you have
to use :func:`~pymaid.get_connector_details`.
Parameters
----------
x : int | CatmaidNeuron | CatmaidNeuronList
Neurons/Skeleton IDs to retrieve link details for. If
CatmaidNeuron/List will respect changes made to
original neurons (e.g. pruning)!
with_tags : bool, optional
If True will also return dictionary of connector tags.
chunk_size : int, optional
Neurons are split into chunks of this size and then
queried sequentially to prevent server from returning
an error.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a connector link::
skeleton_id relation connector_id x y z confidence ...
0
1
2
...
creator node_id creation_time edition_time
0
1
2
(links, tags)
If ``with_tags=True``, will return above DataFrame and tags dict.
See Also
--------
:func:`~pymaid.get_connectors`
If you just need the connector table (ID, x, y, z, creator, etc).
:func:`~pymaid.get_connector_details`
Get the same data but by connector, not by link.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
skids = utils.eval_skids(x, warn_duplicates=False,
remote_instance=remote_instance)
df_collection = []
tags = {}
link_types = [l['relation'] for l in config.link_types]
with config.tqdm(desc='Fetching links', total=len(skids),
disable=config.pbar_hide,
leave=config.pbar_leave) as pbar:
for chunk in [skids[i:i + chunk_size] for i in range(0, len(skids), chunk_size)]:
# Generate URLs
GET = {'skeleton_ids[{}]'.format(i): s for i, s in enumerate(chunk)}
urls = [remote_instance._get_connector_links_url(relation_type=cn,
**GET) for cn in link_types]
# Fetch data
responses = remote_instance.fetch(urls, disable_pbar=True)
# Extract tags
if with_tags:
for r in responses:
tags.update(r['tags'])
# Generate separate DataFrames
data = [pd.DataFrame(r['links'],
columns=['skeleton_id', 'connector_id',
'x', 'y', 'z', 'confidence',
'creator', 'node_id',
'creation_time', 'edition_time']
) for r in responses]
# Add link type to each DataFrame
for t, d in zip(link_types, data):
d['relation'] = t
# Concatenate DataFrames
df = pd.concat(data, axis=0)
# Store
df_collection.append(df)
# Update progress bar
pbar.update(len(chunk))
# Merge DataFrames
df = pd.concat(df_collection, axis=0)
# Cater for cases in which the original neurons have been edited
if isinstance(x, (core.CatmaidNeuron, core.CatmaidNeuronList)):
df = df[df.connector_id.isin(x.connectors.connector_id)]
# Convert to timestamps
df['creation_time'] = pd.to_datetime(df.creation_time)
df['edition_time'] = pd.to_datetime(df.edition_time)
if with_tags:
return df, tags
return df
@cache.undo_on_error
def get_connector_details(x, remote_instance=None):
"""Retrieve details on sets of connectors.
Parameters
----------
x : list of connector IDs | CatmaidNeuron | CatmaidNeuronList
Connector ID(s) to retrieve details for. If
CatmaidNeuron/List, will use their connectors.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a connector::
connector_id presynaptic_to postsynaptic_to ...
0
1
2
...
presynaptic_to_node postsynaptic_to_node
0
1
2
See Also
--------
:func:`~pymaid.get_connectors`
If you just need the connector table (ID, x, y, z, creator, etc).
:func:`~pymaid.get_connector_links`
Get the same data but by link, not by connector.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
connector_ids = utils.eval_node_ids(x, connectors=True, nodes=False)
connector_ids = list(set(connector_ids))
remote_get_connectors_url = remote_instance._get_connector_details_url()
# Depending on DATA_UPLOAD_MAX_NUMBER_FIELDS of your CATMAID server
# (default = 1000), we have to cut requests into batches smaller than that
DATA_UPLOAD_MAX_NUMBER_FIELDS = min(50000, len(connector_ids))
connectors = []
with config.tqdm(total=len(connector_ids), desc='CN details',
disable=config.pbar_hide,
leave=config.pbar_leave) as pbar:
for b in range(0, len(connector_ids), DATA_UPLOAD_MAX_NUMBER_FIELDS):
get_connectors_postdata = {}
for i, s in enumerate(connector_ids[b:b + DATA_UPLOAD_MAX_NUMBER_FIELDS]):
key = 'connector_ids[%i]' % i
get_connectors_postdata[key] = s # connector_ids[i]
connectors += remote_instance.fetch(remote_get_connectors_url,
post=get_connectors_postdata)
pbar.update(DATA_UPLOAD_MAX_NUMBER_FIELDS)
logger.info('Data for %i of %i unique connector IDs retrieved' % (
len(connectors), len(set(connector_ids))))
columns = ['connector_id', 'presynaptic_to', 'postsynaptic_to',
'presynaptic_to_node', 'postsynaptic_to_node']
df = pd.DataFrame([[cn[0]] + [cn[1][e] for e in columns[1:]] for cn in connectors],
columns=columns,
dtype=object
)
return df
@cache.undo_on_error
def get_connector_tags(x, remote_instance=None):
"""Retrieve tags on sets of connectors.
Parameters
----------
x : list of connector IDs | CatmaidNeuron | CatmaidNeuronList
Connector ID(s) to retrieve details for. If
CatmaidNeuron/List, will use their connectors.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
---------
dict
Dictionary mapping tags (``str``) to connector IDs
(``int``)::
{
tag1: [connector1_id, connector2_id, ...],
tag2: [ ... ], ...
}
"""
remote_instance = utils._eval_remote_instance(remote_instance)
connector_ids = utils.eval_node_ids(x, connectors=True, nodes=False)
connector_ids = list(set(connector_ids))
remote_get_node_labels_url = remote_instance._get_node_labels_url()
post = {'connector_ids': ','.join([str(tn) for tn in connector_ids])}
resp = remote_instance.fetch(remote_get_node_labels_url, post=post)
cn_tags = {}
for cnid in resp:
cn_tags.update({tag: cn_tags.get(tag, []) + [int(cnid)] for tag in resp[cnid]})
return cn_tags
@cache.undo_on_error
def get_connectors_between(a, b, directional=True, remote_instance=None):
"""Retrieve connectors between sets of neurons.
Important
---------
This function does currently *not* return gap junctions between neurons.
Notes
-----
A connector can show up multiple times if it is connecting to more than one
nodes of the same neuron.
Parameters
----------
a,b
Neurons for which to retrieve connectors. Can be:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
directional : bool, optional
If True, only connectors a -> b are listed,
otherwise it is a <-> b.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a connector::
connector_id connector_loc node1_id source_neuron ...
0
1
2
...
confidence1 creator1 node1_loc node2_id target_neuron ...
0
1
2
...
confidence2 creator2 node2_loc
0
1
2
See Also
--------
:func:`~pymaid.get_edges`
If you just need the number of synapses between neurons, this is much
faster.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
a = utils.eval_skids(a, remote_instance=remote_instance)
b = utils.eval_skids(b, remote_instance=remote_instance)
if len(a) == 0:
raise ValueError('No source neurons provided')
if len(b) == 0:
raise ValueError('No target neurons provided')
post = {'relation': 'presynaptic_to'}
post.update({'skids1[{0}]'.format(i): s for i, s in enumerate(a)})
post.update({'skids2[{0}]'.format(i): s for i, s in enumerate(b)})
url = remote_instance._get_connectors_between_url()
data = remote_instance.fetch(url, post=post)
if not directional:
post['relation'] = 'postsynaptic_to'
data += remote_instance.fetch(url, post=post)
df = pd.DataFrame(data,
columns=['connector_id', 'connector_loc', 'node1_id',
'source_neuron', 'confidence1', 'creator1',
'node1_loc', 'node2_id',
'target_neuron', 'confidence2', 'creator2',
'node2_loc'])
# Get user list and replace IDs with logins
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
df['creator1'] = df['creator1'].map(user_dict)
df['creator2'] = df['creator2'].map(user_dict)
return df
@cache.undo_on_error
def get_review(x, remote_instance=None):
"""Retrieve review status for a set of neurons.
Parameters
----------
x
Neurons for which to get review status. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a neuron::
skeleton_id neuron_name total_node_count nodes_reviewed ...
0
1
...
percent_reviewed
0
1
...
See Also
--------
:func:`~pymaid.get_review_details`
Gives you review status for individual nodes of a given neuron.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
remote_get_reviews_url = remote_instance._get_review_status_url()
names = {}
review_status = {}
CHUNK_SIZE = 1000
with config.tqdm(total=len(x), disable=config.pbar_hide,
desc='Rev. status',
leave=config.pbar_leave) as pbar:
for j in range(0, len(x), CHUNK_SIZE):
get_review_postdata = {}
for i in range(j, min(j + CHUNK_SIZE, len(x))):
key = 'skeleton_ids[%i]' % i
get_review_postdata[key] = str(x[i])
names.update(get_names(x[j:j + CHUNK_SIZE],
remote_instance=remote_instance))
review_status.update(remote_instance.fetch(remote_get_reviews_url,
post=get_review_postdata))
pbar.update(CHUNK_SIZE)
df = pd.DataFrame([[s,
names[str(s)],
review_status[s][0],
review_status[s][1],
int(review_status[s][1] / review_status[s][0] * 100)
] for s in review_status],
columns=['skeleton_id', 'neuron_name',
'total_node_count', 'nodes_reviewed',
'percent_reviewed']
)
return df
@cache.undo_on_error
def get_user_annotations(x, remote_instance=None):
"""Retrieve annotations used by given user(s).
Parameters
----------
x
User(s) to get annotation for. Can be either:
1. single or list of user IDs
2. single or list of user login names
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame (df) in which each row represents a single annotation::
annotation annotated_on times_used user_id annotation_id user_login
0
1
...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if not isinstance(x, (list, np.ndarray)):
x = [x]
# Get user list
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
try:
ids = [int(e) for e in x]
except BaseException:
ids = user_list.set_index('login').loc[x, 'id'].values
# This works with neuron_id NOT skeleton_id
# neuron_id can be requested via neuron_names
url_list = list()
postdata = list()
iDisplayLength = 500
for u in ids:
url_list.append(remote_instance._get_annotation_table_url())
postdata.append(dict(user_id=int(u),
iDisplayLength=iDisplayLength))
# Get data
annotations = [e['aaData'] for e in remote_instance.fetch(
url_list, post=postdata, desc='Get annot')]
# Add user login
for i, u in enumerate(ids):
for an in annotations[i]:
an.append(user_dict.get(u, 'Anonymous'))
# Now flatten the list of lists
annotations = [an for sublist in annotations for an in sublist]
# Create dataframe
df = pd.DataFrame(annotations,
columns=['annotation', 'annotated_on', 'times_used',
'user_id', 'annotation_id', 'user_login'],
dtype=object
)
df['annotated_on'] = [datetime.datetime.strptime(
d[:16], '%Y-%m-%dT%H:%M') for d in df['annotated_on'].values]
return df.sort_values('times_used').reset_index(drop=True)
@cache.undo_on_error
def get_annotation_details(x, remote_instance=None):
"""Retrieve annotations for a set of neuron.
Returns more details than :func:`~pymaid.get_annotations` but is slower.
Contains timestamps and user IDs (same API as neuron navigator).
Parameters
----------
x
Neurons to get annotation details for. Can be either:
1. List of skeleton ID(s) (int or str)
2. List of neuron name(s) (str, exact match)
3. An annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a single annotation::
annotation skeleton_id time_annotated user_id annotation_id user
0
1
...
See Also
--------
:func:`~pymaid.get_annotations`
Gives you annotations for a list of neurons (faster).
Examples
--------
>>> # Get annotations for a set of neurons
>>> an = pymaid.get_annotation_details([ 12, 57003 ])
>>> # Get those for a single neuron
>>> an[ an.skeleton_id == '57003' ]
>>> # Get annotations given by set of users
>>> an[ an.user.isin( ['schlegelp', 'lif'] )]
>>> # Get most recent annotations
>>> import datetime
>>> an[ an.time_annotated > datetime.date(2017, 6, 1) ]
"""
remote_instance = utils._eval_remote_instance(remote_instance)
skids = utils.eval_skids(x, remote_instance=remote_instance)
# This works with neuron_id NOT skeleton_id
# neuron_id can be requested via neuron_names
url_list = list()
postdata = list()
neuron_ids = get_neuron_id(skids, remote_instance=remote_instance)
for s in skids:
nid = neuron_ids.get(str(s))
url_list.append(remote_instance._get_annotation_table_url())
postdata.append(dict(neuron_id=int(nid)))
# Get data
annotations = [e['aaData'] for e in remote_instance.fetch(url_list,
post=postdata,
desc='Get annot')]
# Get user list
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
# Add skeleton ID and user login
for i, s in enumerate(skids):
for an in annotations[i]:
an.insert(1, s)
an.append(user_dict.get(an[4]))
# Now flatten the list of lists
annotations = [an for sublist in annotations for an in sublist]
# Create dataframe
df = pd.DataFrame(annotations,
columns=['annotation', 'skeleton_id', 'time_annotated',
'times_used', 'user_id', 'annotation_id',
'user'],
dtype=object
)
# Times used appears to not be working (always shows "1") - remove it
df.drop('times_used', inplace=True, axis=1)
df['time_annotated'] = [datetime.datetime.strptime(
d[:16], '%Y-%m-%dT%H:%M') for d in df['time_annotated'].values]
return df.sort_values('annotation').reset_index(drop=True)
@cache.undo_on_error
def get_annotations(x, remote_instance=None):
"""Retrieve annotations for a list of skeleton ids.
If a neuron has no annotations, it will not show up in returned dict!
Notes
-----
This API endpoint does not process more than 250 neurons at a time!
Parameters
----------
x
Neurons for which to retrieve annotations. Can be
either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
``{skeleton_id: [annnotation, annotation], ...}``
See Also
--------
:func:`~pymaid.get_annotation_details`
Gives you more detailed information about annotations
of a set of neuron (includes timestamp and user) but
is slower.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
remote_get_annotations_url = remote_instance._get_annotations_for_skid_list()
get_annotations_postdata = {'metaannotations': 0, 'neuronnames': 0}
for i in range(len(x)):
key = 'skeleton_ids[%i]' % i
get_annotations_postdata[key] = str(x[i])
annotation_list_temp = remote_instance.fetch(remote_get_annotations_url,
post=get_annotations_postdata)
annotation_list = {}
try:
for skid in annotation_list_temp['skeletons']:
annotation_list[skid] = []
# for entry in annotation_list_temp['skeletons'][skid]:
for entry in annotation_list_temp['skeletons'][skid]['annotations']:
annotation_id = entry['id']
annotation_list[skid].append(
annotation_list_temp['annotations'][str(annotation_id)])
return(annotation_list)
except BaseException:
raise Exception(
'No annotations retrieved. Make sure that the skeleton IDs exist.')
@cache.wipe_and_retry
def get_annotation_id(annotations, allow_partial=False, raise_not_found=True,
remote_instance=None):
"""Retrieve the annotation ID for single or list of annotation(s).
Parameters
----------
annotations : str | list of str
Single annotations or list of multiple annotations.
allow_partial : bool, optional
If True, will allow partial matches.
raise_not_found : bool, optional
If True raise Exception if no match for any of the
query annotations is found. Else log warning.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
``{'annotation_name': 'annotation_id', ...}``
"""
remote_instance = utils._eval_remote_instance(remote_instance)
logger.debug('Retrieving list of annotations...')
remote_annotation_list_url = remote_instance._get_annotation_list()
an_list = remote_instance.fetch(remote_annotation_list_url)
# Turn into pandas array
an_list = pd.DataFrame.from_records(an_list['annotations'])
annotations = utils._make_iterable(annotations)
annotation_ids = {}
for an in annotations:
# This is just to catch misunderstandings with parsing skeleton IDs
if an.startswith('annotation:'):
logger.warning('Removing unexpected "annotation:" prefix.')
an = an[11:]
# Strip whitespaces
an = an.strip()
# Strip tilde -> consider that people might use e.g. "~/VA6" for NOT
# VA6
if an.startswith('~'):
an = an[1:]
# '/' indicates regex
if an.startswith('/'):
re_str = an[1:]
# If allow partial just use the raw string
elif allow_partial:
re_str = an
# If exact match, encode this in regex
else:
re_str = '^{}$'.format(an)
# Search for matches
res = an_list[an_list.name.str.match(re_str)].set_index('name').id.to_dict()
if not res:
logger.warning('No annotation found for "{}"'.format(an))
annotation_ids.update(res)
if not annotation_ids:
if raise_not_found:
raise Exception('No matching annotation(s) found')
else:
logger.warning('No matching annotation(s) found')
return annotation_ids
@cache.undo_on_error
def find_nodes(tags=None, node_ids=None, skeleton_ids=None,
remote_instance=None):
"""Get nodes by tag (label), ID or associated skeleton.
Search intersected (logical AND) across parameters but additive (logical OR)
within each parameter (see examples).
Parameters
----------
tags : str | list of str
Use to restrict to nodes with given tags.
node_ids : int | list of int
Use to restrict to nodes with given IDs.
skeleton_ids : str | int | CatmaidNeuron/List, optional
Use to restrict to a set of neurons. Can be:
1. skeleton ID(s) (int or str)
2. neuron name(s) (str)
3. annotation(s): e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a node::
skeleton_id node_id parent_id x y z confidence ...
0
1
2
...
radius edition_time creator_id
0
1
2
See Also
--------
:func:`pymaid.get_connectors`
Function to get connectors by neurons and/or by tags.
Examples
--------
Get all nodes with a given tag
>>> tagged = pymaid.find_nodes(tags='SCHLEGEL_LH')
Get all nodes of a set of neurons with either of two tags
>>> tagged = pymaid.find_nodes(tags=['SCHLEGEL_LH', 'SCHLEGEL_AL'],
skeleton_ids='annotation:glomerulus DA1')
"""
remote_instance = utils._eval_remote_instance(remote_instance)
url = remote_instance._get_node_table_url()
if all([isinstance(x, type(None)) for x in [tags, skeleton_ids, node_ids]]):
answer = ""
while answer not in ["y", "n"]:
answer = input("Your search parameters will retrieve ALL "
"nodes in the dataset. Proceed? "
"[Y/N] ").lower()
if answer != 'y':
logger.info('Query cancelled')
return
post = {}
if not isinstance(tags, type(None)):
tags = utils._make_iterable(tags)
post.update({'label_names[{}]'.format(i): t for i, t in enumerate(tags)})
if not isinstance(node_ids, type(None)):
node_ids = utils._make_iterable(node_ids)
post.update({'treenode_ids[{}]'.format(i): t for i, t in enumerate(node_ids)})
if not isinstance(skeleton_ids, type(None)):
skeleton_ids = utils.eval_skids(skeleton_ids, remote_instance=remote_instance)
post.update({'skeleton_ids[{}]'.format(i): s for i, s in enumerate(skeleton_ids)})
# Fetch
resp = remote_instance.fetch(url, post=post)
# Format is [[ID, parent ID, x, y, z, confidence, radius, skeleton_id,
# edition_time, user_id], ...]
df = pd.DataFrame(resp,
columns=['node_id', 'parent_id', 'x', 'y', 'z', 'confidence',
'radius', 'skeleton_id', 'edition_time',
'creator_id'])
# Reorder and return
return df[['skeleton_id', 'node_id', 'parent_id', 'x', 'y', 'z',
'confidence', 'radius', 'edition_time', 'creator_id']]
@cache.undo_on_error
def has_soma(x, tag='soma', min_rad=500, return_ids=False,
remote_instance=None):
"""Check if neuron(s) has soma.
Parameters
----------
x
Neurons which to check for a soma. Can be either:
1. skeleton ID(s) (int or str)
2. neuron name(s) (str)
3. annotation(s): e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
tag : str | None, optional
Tag we expect the soma to have. Set to ``None`` if
not applicable.
min_rad : int, optional
Minimum radius of soma.
return_ids : bool, optional
If True, will return node IDs of soma(s) found
instead of simply if a soma has been found.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
If ``return_ids=False``::
{skid1: True, skid2: False, ...}
If ``return_ids=True``::
{skid1: [node_id], skid2: [node_id], ...}
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
url = remote_instance._get_node_table_url()
post = {'label_names[0]': tag}
post.update({'skeleton_ids[{}]'.format(i): s for i, s in enumerate(x)})
# Fetch only nodes that have the soma label
resp = remote_instance.fetch(url, post=post)
# Format is [[ID, parent ID, x, y, z, confidence, radius, skeleton_id,
# edition_time, user_id], ...]
if return_ids is False:
by_skid = {int(s): False for s in x}
for e in resp:
by_skid[e[7]] = max(by_skid[e[7]], e[6] >= min_rad)
else:
by_skid = {int(s): [] for s in x}
for e in resp:
if e[6] >= min_rad:
by_skid[e[7]].append(e[0])
return by_skid
@cache.undo_on_error
def get_annotated(x, include_sub_annotations=False, raise_not_found=True,
allow_partial=False, remote_instance=None):
"""Retrieve entities (neurons + annotations) with given annotation(s).
This works similar to CATMAID's neuron search widget: multiple annotations
are intersected! Includes meta-annotations.
Parameters
----------
x : str | list of str
(Meta-)annotations(s) to search for. Like
CATMAID's search widget, you can use regex to
search for names by starting the query with a
leading ``/``. Use a leading ``~`` (tilde) to
indicate ``NOT`` condition.
include_sub_annotations : bool, optional
If True, will include entities that have
annotations meta-annotated with ``x``. Does not
work on `NOT` search conditions.
allow_partial : bool, optional
If True, partially matching annotations are
searched too.
raise_not_found : bool, optional
If True raise Exception if no match for any of the
query annotations is found. Else log warning.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents an entity::
id name skeleton_ids type
0
1
2
...
See Also
--------
:func:`pymaid.find_neurons`
Use to retrieve neurons by combining various
search criteria. For example names, reviewers,
annotations, etc.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
pos, neg = utils._eval_conditions(x)
post = {'with_annotations': False}
if pos:
pos_ids = get_annotation_id(pos, allow_partial=allow_partial,
raise_not_found=raise_not_found,
remote_instance=remote_instance)
post.update({'annotated_with[{}]'.format(i): n for i, n in enumerate(pos_ids.values())})
if include_sub_annotations:
post.update({'sub_annotated_with[{}]'.format(i): n for i, n in enumerate(pos_ids.values())})
if neg:
neg_ids = get_annotation_id(neg, allow_partial=allow_partial,
raise_not_found=raise_not_found,
remote_instance=remote_instance)
post.update({'not_annotated_with[{}]'.format(i): n for i, n in enumerate(neg_ids.values())})
logger.info('Searching for: {}'.format(','.join([str(s) for s in pos_ids])))
if neg:
logger.info('..... and NOT: {}'.format(','.join([str(s) for s in neg_ids])))
urls = remote_instance._get_annotated_url()
resp = remote_instance.fetch(urls, post=post, desc='Fetching')
return pd.DataFrame(resp['entities'])
@cache.undo_on_error
def get_skids_by_name(names, allow_partial=True, raise_not_found=True,
remote_instance=None):
"""Retrieve the all neurons with matching name.
Parameters
----------
names : str | list of str
Name(s) to search for. Like CATMAID's search widget,
you can use regex to search for names by starting
the query with a leading ``/``.
allow_partial : bool, optional
If True, partial matches are returned too.
raise_not_found : bool, optional
If True, will raise an exception of no matches for
given name(s) are found. Else will return empty
DataFrame.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a neuron::
name skeleton_id
0
1
2
...
See Also
--------
:func:`pymaid.find_neurons`
Use to retrieve neurons by combining various
search criteria. For example names, reviewers,
annotations, etc.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
# Only look for unique names
names = list(set(utils._make_iterable(names, force_type=str)))
# Prepare names for regex search on the backend
post = []
for n in names:
post.append({'name': n,
'with_annotations': False,
'name_exact': True})
# If we allow partial matches or are using regex, set exact_name to False
if allow_partial or n.startswith('/'):
post[-1]['name_exact'] = False
urls = [remote_instance._get_annotated_url() for n in post]
responses = remote_instance.fetch(urls, post=post, desc='Fetching names')
neurons = [n for res in responses for n in res['entities'] if n['type'] == 'neuron']
df = pd.DataFrame([[n['name'], n['skeleton_ids'][0]] for n in neurons],
columns=['name', 'skeleton_id'])
if df.empty and raise_not_found:
raise Exception('No matching name(s) found')
return df.sort_values(['name']).drop_duplicates().reset_index(drop=True)
@cache.undo_on_error
def get_skids_by_annotation(annotations, allow_partial=False, intersect=False,
raise_not_found=True, remote_instance=None):
"""Retrieve the neurons annotated with given annotation(s).
Parameters
----------
annotations : str | list
Single annotation or list of multiple annotations.
Using a tilde (~) as prefix is interpreted as NOT.
allow_partial : bool, optional
If True, allow partial match of annotation.
intersect : bool, optional
If True, neurons must have ALL provided
annotations.
raise_not_found : bool, optional
If True raise Exception if no match for any of the
query annotations is found. Else log warning.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
list
``[skid1, skid2, skid3, ...]``
See Also
--------
:func:`pymaid.find_neurons`
Use to retrieve neurons by combining various
search criteria. For example names, reviewers,
annotations, etc.
:func:`pymaid.get_annotated`
Use to retrieve entities (neurons and annotations).
"""
remote_instance = utils._eval_remote_instance(remote_instance)
annotations = utils._make_iterable(annotations)
pos_an = [an for an in annotations if not an.startswith('~')]
neg_an = [an[1:] for an in annotations if an.startswith('~')]
# Placeholders in case we don't even ask for pos or neg
pos_ids = {}
neg_ids = {}
if pos_an:
pos_ids = get_annotation_id(pos_an,
raise_not_found=raise_not_found,
allow_partial=allow_partial,
remote_instance=remote_instance)
if neg_an:
neg_ids = get_annotation_id(neg_an,
raise_not_found=raise_not_found,
allow_partial=allow_partial,
remote_instance=remote_instance)
# Collapse for intersection...
if intersect:
annotation_post = [{'annotated_with[{}]'.format(i): v for i, v in enumerate(list(pos_ids.values()))}]
annotation_post[0].update({'not_annotated_with[{}]'.format(i): v for i, v in enumerate(list(neg_ids.values()))})
# ... or keep separate for no intersection
else:
annotation_post = [{'annotated_with': an} for an in pos_ids.values()]
annotation_post += [{'not_annotated_with': an} for an in neg_ids.values()]
# Need to clear empties
annotation_post = [p for p in annotation_post if p]
# Query server
remote_annotated_url = [remote_instance._get_annotated_url() for _ in annotation_post]
resp = remote_instance.fetch(remote_annotated_url, post=annotation_post)
# Extract skids from responses
annotated_skids = [e['skeleton_ids'][0] for r in resp for e in r['entities'] if e['type'] == 'neuron']
# Remove duplicates
annotated_skids = list(set(annotated_skids))
logger.debug('Found {} neurons with matching annotation(s)'.format(len(annotated_skids)))
return annotated_skids
@cache.undo_on_error
def neuron_exists(x, remote_instance=None):
"""Check if neurons exist in CATMAID.
Parameters
----------
x
Neurons to check if they exist in Catmaid. Can be:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
bool :
True if skeleton exists, False if not. If multiple
neurons are queried, returns a dict
``{skid1: True, skid2: False, ...}``
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
if len(x) > 1:
return {n: neuron_exists(n, remote_instance=remote_instance) for n in x}
else:
x = x[0]
remote_get_neuron_name = remote_instance._get_single_neuronname_url(x)
response = remote_instance.fetch(remote_get_neuron_name)
if 'error' in response:
return False
else:
return True
@cache.undo_on_error
def get_node_info(x, remote_instance=None):
"""Retrieve info for a set of nodes.
Parameters
----------
x CatmaidNeuron | CatmaidNeuronList | list of node IDs
Single or list of node IDs. If CatmaidNeuron/List,
details for all it's nodes are requested.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas DataFrame
DataFrame in which each row represents a queried node::
node_id neuron_name skeleton_id skeleton_name neuron_id
0
1
...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
node_ids = utils.eval_node_ids(x, connectors=False, nodes=True)
urls = [remote_instance._get_single_node_info_url(tn) for tn in node_ids]
data = remote_instance.fetch(urls, desc='Get info')
df = pd.DataFrame([[node_ids[i]] + list(n.values()) for i, n in enumerate(data)],
columns=['node_id'] + list(data[0].keys())
)
return df
@cache.undo_on_error
def get_node_tags(node_ids, node_type, remote_instance=None):
"""Retrieve tags for a set of nodes OR connectors.
Parameters
----------
node_ids
Single or list of node or connector IDs.
node_type : 'NODE' | 'CONNECTOR'
Set which node type of IDs you have provided as they
use different API endpoints!
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
dict
dictionary containing tags for each node:
``{'nodeID': ['tag1', 'tag2', ...], 'nodeID' : [...], ...}``
Examples
--------
>>> pymaid.get_node_tags(['6626578', '6633237']
... 'NODE',
... remote_instance)
{'6633237': ['ends'], '6626578': ['ends']}
See Also
--------
:func:`pymaid.add_tags`
Use to add tags to nodes.
:func:`pymaid.delete_tags`
Use to delete node tags.
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if not isinstance(node_ids, (list, np.ndarray)):
node_ids = [node_ids]
# Make sure node_ids are strings
node_ids = [str(n) for n in node_ids]
url = remote_instance._get_node_labels_url()
if node_type in ['TREENODE', 'TREENODES', 'NODES', 'NODE']:
key = 'treenode_ids'
elif node_type in ['CONNECTOR', 'CONNECTORS']:
key = 'connector_ids'
else:
raise TypeError(f'Unknown node_type parameter: {node_type}')
POST = {key: ','.join([str(tn) for tn in node_ids])}
return remote_instance.fetch(url, post=POST)
@cache.undo_on_error
def get_segments(x, remote_instance=None):
"""Retrieve list of segments for a neuron just like the review widget.
Parameters
----------
x
Neurons to retrieve. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
list
List of node IDs, ordered by length. If multiple neurons
are requested, returns a dict ``{skid: [], ...}``.
See Also
--------
``CatmaidNeuron.segments``
``CatmaidNeuron.short_segments``
Use these :class:`pymaid.CatmaidNeuron` attributes to access
segments generated by pymaid (faster).
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
urls = []
post_data = []
for s in x:
urls.append(remote_instance._get_review_details_url(s))
# For some reason this needs to fetched as POST (even though actual
# POST data is not necessary)
post_data.append({'placeholder': 0})
rdata = remote_instance.fetch(urls, post=post_data, desc='Get segs')
if len(x) > 1:
return {x[i]: [[tn['id'] for tn in arb['sequence']] for arb in rdata[i]] for i in range(len(x))}
else:
return [[tn['id'] for tn in arb['sequence']] for arb in rdata[0]]
@cache.undo_on_error
def get_review_details(x, remote_instance=None):
"""Retrieve review status (reviewer + timestamp) by node for given neuron.
Parameters
----------
x
Neurons to get review-details for. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas DataFrame
DataFrame in which each row respresents a node::
node_id skeleton_id reviewer1 reviewer2 reviewer 3
0 12345 12345123 datetime NaT datetime
1
...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
node_list = []
urls = []
post_data = []
for s in x:
urls.append(remote_instance._get_review_details_url(s))
# For some reason this needs to fetched as POST (even though actual
# POST data is not necessary)
post_data.append({'placeholder': 0})
rdata = remote_instance.fetch(urls,
post=post_data,
desc='Get rev stats')
for i, neuron in enumerate(rdata):
# There is a small chance that nodes are counted twice but not
# tracking node_id speeds up this extraction a LOT
# node_ids = []
for arbor in neuron:
node_list += [(n['id'], x[i], n['rids'])
for n in arbor['sequence'] if n['rids']]
tn_to_skid = {n[0]: n[1] for n in node_list}
node_dict = {n[0]: {u[0]: datetime.datetime.strptime(
u[1][:16], '%Y-%m-%dT%H:%M') for u in n[2]} for n in node_list}
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
df = pd.DataFrame.from_dict(node_dict, orient='index').fillna(np.nan)
df.columns = df.columns.map(user_dict)
df['skeleton_id'] = [tn_to_skid[tn] for tn in df.index.values]
df.index.name = 'node_id'
df = df.reset_index(drop=False)
# Make sure we didn't count nodes twice
df = df[~df.duplicated('node_id')]
return df
@cache.undo_on_error
def get_logs(operations=[], entries=50, display_start=0, search="",
remote_instance=None):
"""Retrieve logs (same data as in log widget).
Parameters
----------
operations : list of str, optional
If empty, all operations will be queried from server
possible operations: 'join_skeleton',
'change_confidence', 'rename_neuron', 'create_neuron',
'create_skeleton', 'remove_neuron', 'split_skeleton',
'reroot_skeleton', 'reset_reviews', 'move_skeleton'
entries : int, optional
Number of entries to retrieve.
display_start : int, optional
Sets range of entries to return:
``display_start`` to ``display_start + entries``.
search : str, optional
Use to filter results for e.g. a specific skeleton ID
or neuron name.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a single operation::
user operation timestamp x y z explanation
0
1
...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if not operations:
operations = [-1]
elif not isinstance(operations, (list, np.ndarray)):
operations = [operations]
logs = []
for op in operations:
get_logs_postdata = {'sEcho': 6,
'iColumns': 7,
'iDisplayStart': display_start,
'iDisplayLength': entries,
'mDataProp_0': 0,
'sSearch_0': '',
'bRegex_0': False,
'bSearchable_0': False,
'bSortable_0': True,
'mDataProp_1': 1,
'sSearch_1': '',
'bRegex_1': False,
'bSearchable_1': False,
'bSortable_1': True,
'mDataProp_2': 2,
'sSearch_2': '',
'bRegex_2': False,
'bSearchable_2': False,
'bSortable_2': True,
'mDataProp_3': 3,
'sSearch_3': '',
'bRegex_3': False,
'bSearchable_3': False,
'bSortable_3': False,
'mDataProp_4': 4,
'sSearch_4': '',
'bRegex_4': False,
'bSearchable_4': False,
'bSortable_4': False,
'mDataProp_5': 5,
'sSearch_5': '',
'bRegex_5': False,
'bSearchable_5': False,
'bSortable_5': False,
'mDataProp_6': 6,
'sSearch_6': '',
'bRegex_6': False,
'bSearchable_6': False,
'bSortable_6': False,
'sSearch': '',
'bRegex': False,
'iSortCol_0': 2,
'sSortDir_0': 'desc',
'iSortingCols': 1,
'self.project_id': remote_instance.project_id,
'operation_type': op,
'search_freetext': search}
remote_get_logs_url = remote_instance._get_logs_url()
logs += remote_instance.fetch(remote_get_logs_url,
post=get_logs_postdata)['aaData']
df = pd.DataFrame(logs,
columns=['user', 'operation', 'timestamp',
'x', 'y', 'z', 'explanation']
)
df['timestamp'] = [datetime.datetime.strptime(
d[:16], '%Y-%m-%dT%H:%M') for d in df['timestamp'].values]
return df
@cache.undo_on_error
def get_contributor_statistics(x, separate=False, max_threads=500,
remote_instance=None):
"""Retrieve contributor statistics for given skeleton ids.
By default, stats are given over all neurons.
Parameters
----------
x
Neurons to get contributor stats for. Can be either:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation: e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
separate : bool, optional
If True, stats are given per neuron.
max_threads : int, optional
Maximum parallel data requests. Overrides
``CatmaidInstance.max_threads``.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.DataFrame or pandas.Series
Series, if ``separate=False``. DataFrame, if ``separate=True``::
skeleton_id node_contributors multiuser_review_minutes ..
1
2
3
post_contributors construction_minutes min_review_minutes ..
1
2
3
n_postsynapses n_presynapses pre_contributors n_nodes ..
1
2
3
review_contributors
1
2
3
Examples
--------
>>> # Plot contributions as pie chart
>>> import matplotlib.pyplot as plt
>>> cont = pymaid.get_contributor_statistics("annotation:uPN right")
>>> plt.subplot(131, aspect=1)
>>> ax1 = plt.pie(cont.node_contributors.values(),
... labels=cont.node_contributors.keys(),
... autopct='%.0f%%' )
>>> plt.subplot(132, aspect=1)
>>> ax2 = plt.pie(cont.pre_contributors.values(),
... labels=cont.pre_contributors.keys(),
... autopct='%.0f%%' )
>>> plt.subplot(133, aspect=1)
>>> ax3 = plt.pie(cont.post_contributors.values(),
... labels=cont.post_contributors.keys(),
... autopct='%.0f%%' )
>>> plt.show()
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_skids(x, remote_instance=remote_instance)
columns = ['skeleton_id', 'n_nodes', 'node_contributors', 'n_presynapses',
'pre_contributors', 'n_postsynapses', 'post_contributors',
'review_contributors', 'multiuser_review_minutes',
'construction_minutes', 'min_review_minutes']
user_list = get_user_list(remote_instance=remote_instance)
user_dict = user_list.set_index('id').login.to_dict()
if not separate:
with config.tqdm(total=len(x), desc='Contr. stats',
disable=config.pbar_hide,
leave=config.pbar_leave) as pbar:
stats = []
for j in range(0, len(x), max_threads):
pbar.update(j)
get_statistics_postdata = {}
for i in range(j, min(len(x), j + max_threads)):
key = 'skids[%i]' % i
get_statistics_postdata[key] = x[i]
remote_get_statistics_url = remote_instance._get_contributions_url()
stats.append(remote_instance.fetch(remote_get_statistics_url,
post=get_statistics_postdata))
# Now generate DataFrame
node_contributors = {user_dict.get(int(u)): sum([st['node_contributors'][u] for st in stats if u in st[
'node_contributors']]) for st in stats for u in st['node_contributors']}
pre_contributors = {user_dict.get(int(u)): sum([st['pre_contributors'][u] for st in stats if u in st[
'pre_contributors']]) for st in stats for u in st['pre_contributors']}
post_contributors = {user_dict.get(int(u)): sum([st['post_contributors'][u] for st in stats if u in st[
'post_contributors']]) for st in stats for u in st['post_contributors']}
review_contributors = {user_dict.get(int(u)): sum([st['review_contributors'][u] for st in stats if u in st[
'review_contributors']]) for st in stats for u in st['review_contributors']}
df = pd.Series([
x,
sum([st['n_nodes'] for st in stats]),
node_contributors,
sum([st['n_pre'] for st in stats]),
pre_contributors,
sum([st['n_post'] for st in stats]),
post_contributors,
review_contributors,
sum([st['multiuser_review_minutes'] for st in stats]),
sum([st['construction_minutes'] for st in stats]),
sum([st['min_review_minutes'] for st in stats])
],
index=columns,
dtype=object
)
else:
get_statistics_postdata = [{'skids[0]': s} for s in x]
remote_get_statistics_url = [
remote_instance._get_contributions_url() for s in x]
stats = remote_instance.fetch(remote_get_statistics_url,
post=get_statistics_postdata,
desc='Get contrib.')
df = pd.DataFrame([[
s,
stats[i]['n_nodes'],
{user_dict.get(int(u)): stats[i]['node_contributors'][u]
for u in stats[i]['node_contributors']},
stats[i]['n_pre'],
{user_dict.get(int(u)): stats[i]['pre_contributors'][u]
for u in stats[i]['pre_contributors']},
stats[i]['n_post'],
{user_dict.get(int(u)): stats[i]['post_contributors'][u]
for u in stats[i]['post_contributors']},
{user_dict.get(int(u)): stats[i]['review_contributors'][u]
for u in stats[i]['review_contributors']},
stats[i]['multiuser_review_minutes'],
stats[i]['construction_minutes'],
stats[i]['min_review_minutes']
] for i, s in enumerate(x)],
columns=columns,
dtype=object
)
return df
@cache.undo_on_error
def get_history(start_date=(datetime.date.today() - datetime.timedelta(days=7)).isoformat(),
end_date=datetime.date.today().isoformat(), split=True,
remote_instance=None):
"""Retrieves CATMAID project history.
If the time window is too large, the connection might time out which will
result in an error! Make sure ``split=True`` to avoid that.
Parameters
----------
start_date : datetime | str | tuple, optional, default=last week
dates can be either:
- ``datetime.date``
- ``datetime.datetime``
- str ``'YYYY-MM-DD'``, e.g. ``'2016-03-09'``
- tuple ``(YYYY, MM, DD)``, e.g. ``(2016, 3, 9)``
end_date : datetime | str | tuple, optional, default=today
See start_date.
split : bool, optional
If True, history will be requested in bouts of 6 months.
Useful if you want to look at a very big time window
as this can lead to gateway timeout.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas.Series
A pandas.Series with the following entries::
{
cable : DataFrame containing cable created in nm.
Rows = users, columns = dates
connector_links : DataFrame containing connector links created.
Rows = users, columns = dates
reviewed : DataFrame containing nodes reviewed.
Rows = users, columns = dates
user_details : user-list (see pymaid.get_user_list())
nodes : DataFrame containing nodes created by user.
}
Examples
--------
>>> import matplotlib.pyplot as plt
>>> # Plot cable created by all users over time
>>> hist.cable.T.plot()
>>> plt.show()
>>> # Collapse users and plot sum of cable over time
>>> hist.cable.sum(0).plot()
>>> plt.show()
>>> # Plot single users cable (index by user login name)
>>> hist.cable.loc['schlegelp'].T.plot()
>>> plt.show()
>>> # Sum up cable created this week by all users
>>> hist.cable.values.sum()
>>> # Get number of active (non-zero) users
>>> active_users = hist.cable.astype(bool).sum(axis=0)
See Also
-------
:func:`~pymaid.get_user_stats`
Returns a summary of user stats as table.
:func:`~pymaid.plot_history`
Quick way to plot history over time.
"""
def _constructor_helper(data, key, days):
""" Helper to extract variable from data returned by CATMAID server
"""
temp = []
for d in days:
try:
temp.append(data[d][key])
except BaseException:
temp.append(0)
return temp
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(start_date, datetime.date):
start_date = start_date.isoformat()
elif isinstance(start_date, datetime.datetime):
start_date = start_date.isoformat()[:10]
elif isinstance(start_date, (tuple, list)):
start_date = datetime.date(start_date[0], start_date[
1], start_date[2]).isoformat()
if isinstance(end_date, datetime.date):
end_date = end_date.isoformat()
elif isinstance(end_date, datetime.datetime):
end_date = end_date.isoformat()[:10]
elif isinstance(end_date, (tuple, list)):
end_date = datetime.date(end_date[0], end_date[
1], end_date[2]).isoformat()
rounds = []
if split:
start = datetime.datetime.strptime(start_date, "%Y-%m-%d").date()
end = datetime.datetime.strptime(end_date, "%Y-%m-%d").date()
logger.info(
'Retrieving %i days of history in bouts!' % (end - start).days)
# First make big bouts of roughly 6 months each
while start < (end - datetime.timedelta(days=6 * 30)):
rounds.append((start.isoformat(),
(start + datetime.timedelta(days=6 * 30)).isoformat()))
start += datetime.timedelta(days=6 * 30)
# Append the last bit
if start < end:
rounds.append((start.isoformat(), end.isoformat()))
else:
rounds = [(start_date, end_date)]
data = []
for r in config.tqdm(rounds, desc='Retrieving history',
disable=config.pbar_hide, leave=config.pbar_leave):
get_history_GET_data = {'pid': remote_instance.project_id,
'start_date': r[0],
'end_date': r[1]
}
remote_get_history_url = remote_instance._get_history_url()
remote_get_history_url += f'?{urllib.parse.urlencode(get_history_GET_data)}'
logger.debug(f'Retrieving user history from {r[0]} to {r[1]}.')
data.append(remote_instance.fetch(remote_get_history_url))
# Now merge data into a single dict
stats = dict(data[0])
for d in data:
stats['days'] += [e for e in d['days'] if e not in stats['days']]
stats['daysformatted'] += [e for e in d['daysformatted']
if e not in stats['daysformatted']]
for u in d['stats_table']:
stats['stats_table'][u].update(d['stats_table'][u])
user_list = get_user_list(remote_instance=remote_instance).set_index('id')
user_list.index = user_list.index.astype(str)
user_dict = user_list.login.to_dict()
df = pd.Series([
pd.DataFrame([_constructor_helper(stats['stats_table'][u], 'new_cable_length', stats['days']) for u in stats['stats_table']],
index=[user_dict.get(u, f'Anonymous{i}') for i, u in enumerate(stats['stats_table'].keys())],
columns=pd.to_datetime([datetime.datetime.strptime(d, '%Y%m%d').date() for d in stats['days']])),
pd.DataFrame([_constructor_helper(stats['stats_table'][u], 'new_treenodes', stats['days']) for u in stats['stats_table']],
index=[user_dict.get(u, f'Anonymous{i}') for i, u in enumerate(stats['stats_table'].keys())],
columns=pd.to_datetime([datetime.datetime.strptime(d, '%Y%m%d').date() for d in stats['days']])),
pd.DataFrame([_constructor_helper(stats['stats_table'][u], 'new_connectors', stats['days']) for u in stats['stats_table']],
index=[user_dict.get(u, f'Anonymous{i}') for i, u in enumerate(stats['stats_table'].keys())],
columns=pd.to_datetime([datetime.datetime.strptime(d, '%Y%m%d').date() for d in stats['days']])),
pd.DataFrame([_constructor_helper(stats['stats_table'][u], 'new_reviewed_nodes', stats['days']) for u in stats['stats_table']],
index=[user_dict.get(u, f'Anonymous{i}') for i, u in enumerate(stats['stats_table'].keys())],
columns=pd.to_datetime([datetime.datetime.strptime(d, '%Y%m%d').date() for d in stats['days']])),
user_list.reset_index(drop=True)
],
index=['cable', 'nodes', 'connector_links',
'reviewed', 'user_details']
)
return df
@cache.undo_on_error
def get_nodes_in_volume(*x, coord_format='NM', resolution=(4, 4, 50),
remote_instance=None):
"""Retrieve nodes and connectors in given bounding box.
Please note that there is a cap on the number of nodes returned that is
hard wired into the CATMAID server's settings.
Parameters
----------
*x
Coordinates defining the bounding box. Can be
either:
- 1d list of coordinates: left, right, top, bottom, z1, z2
- 2d list of coordinates: [[left, right], [top, bottom], [z1, z2]]
- pymaid.Volume
Can be given in nm or pixels.
coord_format : str, optional
Define whether provided coordinates are in
nanometer ('NM') or in pixels/slices ('PIXEL').
resolution : tuple of floats, optional
x/y/z resolution in nm [default = (4, 4, 50)]
Used to transform to nm if limits are given in
pixels.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
nodes : pandas.DataFrame
DataFrame in which each row is a node::
node_id parent_id x y z confidence radius skeleton_id edition_time user_id
0
1
2
connectors : pandas.DataFrame
DataFrame in which each row is a connector::
connector_id x y z confidence edition_time user_id partners
0
1
2
``partners`` are lists of::
[node_id, relation_id, link_confidence, link_edition_time, link_id]
truncated : bool
If True, lists are truncated due to node limit reached.
relation_map : dict
Map for ``relation_id`` in connector's ``partner`` column.
Examples
--------
Get (truncated) lists of nodes and connectors in the bounding box of the AL:
>>> al = pymaid.get_volume('AL_R')
>>> nodes, connectors, truncated, relation_map = pymaid.get_nodes_in_volume(al)
>>> truncated
True
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(x[0], ns.Volume):
x = x[0].bbox
# Flatten the list of coordinates
coords = np.array(x).flatten()
if coords.shape[0] != 6:
raise ValueError('Must provide 6 coordinates (left, right, top, '
'bottom, z1, z1), got {}'.format(coords.shape[0]))
# Extract coords
left, right, top, bottom, z1, z2 = coords
# Set resolution to 1:1 if coordinates are already in nm
if coord_format == 'NM':
resolution = (1, 1, 1)
remote_nodes_list = remote_instance._get_node_list_url()
node_list_postdata = {
'left': left * resolution[0],
'right': right * resolution[0],
'top': top * resolution[1],
'bottom': bottom * resolution[1],
'z1': z1 * resolution[2],
'z2': z2 * resolution[2],
# Atnid seems to be related to fetching the active node too
# -> will be ignored if atnid = -1
'atnid': -1,
'labels': False,
# 'limit': 3500, # this doesn't do anything -> hard wired into server settings
}
node_data = remote_instance.fetch(remote_nodes_list,
post=node_list_postdata)
tn = pd.DataFrame(node_data[0],
columns=['node_id', 'parent_id',
'x', 'y', 'z', 'confidence',
'radius', 'skeleton_id',
'edition_time', 'user_id'])
# Fix parent ID
tn.loc[tn.parent_id.isnull(), 'parent_id'] = -1
tn['parent_id'] = tn.parent_id.astype(int).astype(object)
tn.loc[tn.parent_id < 0, 'parent_id'] = None
tn['edition_time'] = pd.to_datetime(tn.edition_time, unit='s', utc=True)
cn = pd.DataFrame(node_data[1],
columns=['connector_id', 'x', 'y', 'z',
'confidence', 'edition_time',
'user_id', 'partners'])
cn['edition_time'] = pd.to_datetime(cn.edition_time, unit='s', utc=True)
node_limit_reached = node_data[3]
relation_map = node_data[4]
return tn, cn, node_limit_reached, relation_map
@cache.undo_on_error
def find_neurons(names=None, annotations=None, volumes=None, users=None,
from_date=None, to_date=None, reviewed_by=None, skids=None,
intersect=False, partial_match=False, only_soma=False,
min_size=1, minimum_cont=None, remote_instance=None):
"""Find neurons matching given search criteria.
Warning
-------
Depending on the parameters, this can take quite a while! Also: by default,
will return single-node neurons! Use the ``min_size`` parameter to change
that behaviour.
Parameters
----------
names : str | list of str
Neuron name(s) to search for.
annotations : str | list of str
Annotation(s) to search for.
volumes : str | navis.Volume | list of either
CATMAID volume(s) to look into. This uses
:func:`~pymaid.get_neurons_in_volumes` and will look
for neurons within the **bounding box** of given
volume(s).
users : int | str | list of either, optional
User ID(s) (int) or login(s) (str).
reviewed_by : int | str | list of either, optional
User ID(s) (int) or login(s) (str) of reviewer.
from_date : datetime | list of integers, optional
Format: [year, month, day]. Return neurons created
after this date. This works ONLY if also querying by
``users`` or ``reviewed_by``!
to_date : datetime | list of integers, optional
Format: [year, month, day]. Return neurons created
before this date. This works ONLY if also querying by
``users`` or ``reviewed_by``!
skids : list of skids, optional
Can be a list of skids, a CatmaidNeuronList or pandas
DataFrame with "skeleton_id" column.
intersect : bool, optional
If multiple search criteria are provided, neurons have
to meet all of them in order to be returned. This
is first applied WITHIN search criteria (works for
multiple ``annotations``, ``volumes``, ``users`` and
``reviewed_by``) and then ACROSS critera!
partial_match : bool, optional
If True, partial matches for *names* AND *annotations*
are allowed.
minimum_cont : int, optional
If looking for specific ``users``: minimum contribution
(in nodes) to a neuron in order for it to be counted.
Only applicable if ``users`` is provided. If multiple
users are provided contribution is calculated across
all users. Minimum contribution does NOT take start
and end dates into account! This is applied AFTER
intersecting!
min_size : int, optional
Minimum size (in nodes) for neurons to be returned.
The lower this value, the longer it will take to
filter.
only_soma : bool, optional
If True, only neurons with a soma are returned.
remote_instance : CatmaidInstance
If not passed directly, will try using globally
defined CatmaidInstance.
Returns
-------
:class:`~pymaid.CatmaidNeuronList`
Examples
--------
>>> # Simple request for neurons with given annotations
>>> to_find = ['glomerulus DA1', 'glomerulus DL4']
>>> skids = pymaid.find_neurons(annotations=to_find)
>>> # Get only neurons that have both annotations
>>> skids = pymaid.find_neurons(annotations=to_find, intersect=True)
>>> # Get all neurons with more than 1000 nodes
>>> skids = pymaid.find_neurons(min_size=1000)
>>> # Get all neurons that have been traced recently by given user
>>> skids = pymaid.find_neurons(users='schlegelp',
... from_date=[2017, 10, 1])
>>> # Get all neurons traced by a given user within a certain volume
>>> skids = pymaid.find_neurons(users='schlegelp',
... minimum_cont=1000,
... volumes='LH_R')
"""
remote_instance = utils._eval_remote_instance(remote_instance)
# Fist, we have to prepare a whole lot of parameters
if users:
users = utils.eval_user_ids(users, remote_instance=remote_instance)
if reviewed_by:
reviewed_by = utils.eval_user_ids(
reviewed_by, remote_instance=remote_instance)
if annotations and not isinstance(annotations, (list, np.ndarray)):
annotations = [annotations]
if names and not isinstance(names, (list, np.ndarray)):
names = [names]
if volumes and not isinstance(volumes, (list, np.ndarray)):
volumes = [volumes]
# Bring dates into the correct format
if from_date and not to_date:
today = datetime.date.today()
to_date = (today.year, today.month, today.day)
elif to_date and not from_date:
from_date = (1900, 1, 1)
if isinstance(from_date, datetime.date):
from_date = [from_date.year, from_date.month, from_date.day]
if isinstance(to_date, datetime.date):
to_date = [to_date.year, to_date.month, to_date.day]
# Warn if from/to_date are used without also querying by user or reviewer
if from_date and not (users or reviewed_by):
logger.warning('Start/End dates can only be used for queries against '
'<users> or <reviewed_by>')
# Now go over all parameters and get sets of skids
sets_of_skids = []
if not isinstance(skids, type(None)):
skids = utils.eval_skids(skids, remote_instance=remote_instance)
sets_of_skids.append(set(skids))
# Get skids by name
if names:
urls = [remote_instance._get_annotated_url() for n in names]
post_data = [{'name': str(n),
'with_annotations': False,
'name_exact': not partial_match}
for n in names]
results = remote_instance.fetch(urls,
post=post_data,
desc='Get names')
this_name = []
for i, r in enumerate(results):
for e in r['entities']:
if partial_match and e['type'] == 'neuron' and names[i].lower() in e['name'].lower():
this_name.append(e['skeleton_ids'][0])
if not partial_match and e['type'] == 'neuron' and e['name'] == names[i]:
this_name.append(e['skeleton_ids'][0])
sets_of_skids.append(set(this_name))
# Get skids by annotation
if annotations:
annotation_ids = get_annotation_id(annotations,
allow_partial=partial_match,
remote_instance=remote_instance)
if not annotation_ids:
raise Exception('No matching annotation(s) found!')
if partial_match is True:
logger.debug('Found {0} id(s) (partial matches '
'included)'.format(len(annotation_ids)))
else:
logger.debug('Found id(s): %s | Unable to retrieve: %i' % (
str(annotation_ids), len(annotations) - len(annotation_ids)))
urls = [remote_instance._get_annotated_url() for an in annotation_ids]
post_data = [{'annotated_with': str(an), 'with_annotations': 'false'}
for an in annotation_ids.values()]
results = remote_instance.fetch(urls,
post=post_data,
desc='Get annot')
annotated = [set([e['skeleton_ids'][0] for e in res['entities'] if e['type'] == 'neuron']) for res in results]
# Intersect within search criteria if applicable
if intersect:
sets_of_skids.append(set.intersection(*annotated))
else:
sets_of_skids.append(set.union(*annotated))
# Get skids by user
if users:
urls = [remote_instance._get_list_skeletons_url() for u in users]
GET_data = [{'nodecount_gt': min_size - 1,
'created_by': u} for u in users]
if from_date and to_date:
dates = {'from': ''.join(['{0:02d}'.format(d) for d in from_date]),
'to': ''.join(['{0:02d}'.format(d) for d in to_date])}
GET_data = [{**d, **dates} for d in GET_data]
urls = [u + '?%s' % urllib.parse.urlencode(g) for u, g in zip(urls, GET_data)]
results = remote_instance.fetch(urls, desc='Get users')
# Intersect within search criteria if applicable
if intersect:
sets_of_skids.append(set.intersection(*[set(res) for res in results]))
else:
sets_of_skids.append(set.union(*[set(res) for res in results]))
# Get skids by reviewer
if reviewed_by:
urls = [remote_instance._get_list_skeletons_url() for u in reviewed_by]
GET_data = [{'nodecount_gt': min_size - 1,
'reviewed_by': u} for u in reviewed_by]
if from_date and to_date:
dates = {'from': ''.join(['{0:02d}'.format(d) for d in from_date]),
'to': ''.join(['{0:02d}'.format(d) for d in to_date])}
GET_data = [{**d, **dates} for d in GET_data]
urls = [u + '?%s' % urllib.parse.urlencode(g) for u, g in zip(urls, GET_data)]
results = remote_instance.fetch(urls, desc='Get reviewers')
# Intersect within search criteria if applicable
if intersect:
sets_of_skids.append(set.intersection(*[set(res) for res in results]))
else:
sets_of_skids.append(set.union(*[set(res) for res in results]))
# Get by volume
if volumes:
temp = []
for v in config.tqdm(volumes, desc='Get by vols',
disable=config.pbar_hide,
leave=config.pbar_leave):
if not isinstance(v, ns.Volume):
vol = get_volume(v, remote_instance)
else:
vol = v
temp.append(set(get_neurons_in_bbox(vol.bbox,
remote_instance=remote_instance)))
# Intersect within search criteria if applicable
if intersect:
sets_of_skids.append(set.intersection(*temp))
else:
sets_of_skids.append(set.union(*temp))
# Get neurons by size if only min_size and no other no parameters were
# provided
if False not in [isinstance(param, type(None)) for param in [names,
annotations,
volumes,
users,
reviewed_by,
skids]]:
# Make sure people don't accidentally request ALL neurons in the
# dataset
if min_size <= 1:
answer = ""
while answer not in ["y", "n"]:
answer = input("Your search parameters will retrieve ALL "
"neurons in the dataset. Proceed? "
"[Y/N] ").lower()
if answer != 'y':
logger.info('Query cancelled')
return
logger.info(
'Get all neurons with >= {0} nodes'.format(min_size))
get_skeleton_list_GET_data = {'nodecount_gt': min_size - 1}
remote_get_list_url = remote_instance._get_list_skeletons_url()
remote_get_list_url += '?%s' % urllib.parse.urlencode(
get_skeleton_list_GET_data)
these_neurons = set(remote_instance.fetch(remote_get_list_url))
sets_of_skids.append(these_neurons)
# Now intersect/merge ACROSS search criteria
if intersect:
logger.info('Intersecting by search parameters')
skids = list(set.intersection(*sets_of_skids))
else:
skids = list(set.union(*sets_of_skids))
# Filtering by size was already done for users and reviewed_by and dates
# If we queried by annotations, names or volumes we need to do this
# explicitly here
if min_size > 1 and (volumes or annotations or names):
logger.info('Filtering neurons for size')
get_skeleton_list_GET_data = {'nodecount_gt': min_size - 1}
remote_get_list_url = remote_instance._get_list_skeletons_url()
remote_get_list_url += '?%s' % urllib.parse.urlencode(
get_skeleton_list_GET_data)
neurons_by_size = set(remote_instance.fetch(remote_get_list_url))
skids = set.intersection(set(skids), neurons_by_size)
nl = core.CatmaidNeuronList(list(skids), remote_instance=remote_instance)
if only_soma:
hs = has_soma(nl, return_ids=False, remote_instance=remote_instance)
nl = core.CatmaidNeuronList([n for n in nl if hs[int(n.skeleton_id)]])
if users and minimum_cont:
nl.get_skeletons(skip_existing=True)
nl = core.CatmaidNeuronList([n for n in nl if n.nodes[n.nodes.creator_id.isin(users)].shape[0] >= minimum_cont],
remote_instance=remote_instance)
if nl.empty:
logger.warning(
'No neurons matching the search parameters were found')
else:
logger.info(f'Found {len(nl)} neurons matching the search parameters')
nl.get_names()
return nl
@cache.undo_on_error
def get_neurons_in_volume(volumes, min_nodes=2, min_cable=1, intersect=False,
only_soma=False, remote_instance=None):
"""Retrieves neurons with processes within CATMAID volumes.
This function uses the **BOUNDING BOX** around volume as proxy and queries
for neurons that are within that volume. See examples on how to work
around this.
Warning
-------
Depending on the number of nodes in that volume, this can take quite a
while! Also: by default, will NOT return single-node neurons - use the
``min_nodes`` parameter to change that behaviour.
Parameters
----------
volumes : str | navis.Volume | list of either
Single or list of CATMAID volumes.
min_nodes : int, optional
Minimum node count for a neuron within given
volume(s).
min_cable : int, optional
Minimum cable length [nm] for a neuron within
given volume(s).
intersect : bool, optional
If multiple volumes are provided, this parameter
determines if neurons have to be in all of the
volumes or just a single.
only_soma : bool, optional
If True, only neurons with a soma will be returned.
remote_instance : CatmaidInstance
If not passed directly, will try using global.
Returns
-------
list
``[skeleton_id, skeleton_id, ...]``
See Also
--------
:func:`~pymaid.get_partners_in_volume`
Get only partners that make connections within a
given volume.
:func:`pymaid.find_neurons`
Use to retrieve neurons by combining various
search criteria. For example names, reviewers,
annotations, etc.
Examples
--------
>>> # Get a volume
>>> lh = pymaid.get_volume('LH_R')
>>> # Get neurons within the bounding box of a volume
>>> skids = pymaid.get_neurons_in_volume(lh, min_nodes=10)
>>> # Retrieve 3D skeletons of these neurons
>>> lh_neurons = pymaid.get_neurons(skids)
>>> # Prune by volume
>>> lh_pruned = lh_neurons.copy()
>>> lh_pruned.prune_by_volume(lh)
>>> # Filter neurons with more than 100um of cable in the volume
>>> n = lh_neurons[lh_pruned.cable_length > 100]
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if not isinstance(volumes, (list, np.ndarray)):
volumes = [volumes]
for i, v in enumerate(volumes):
if not isinstance(v, ns.Volume):
volumes[i] = get_volume(v)
neurons = []
for v in volumes:
logger.info('Retrieving neurons in volume {0}'.format(v.name))
temp = get_neurons_in_bbox(v, min_nodes=min_nodes,
min_cable=min_cable,
remote_instance=remote_instance)
if not intersect:
neurons += list(temp)
else:
neurons += [temp]
if intersect:
# Filter for neurons that show up in all neuropils
neurons = [n for l in neurons for n in l if False not in [n in v for v in neurons]]
# Need to do this in case we have several volumes
neurons = list(set(neurons))
if only_soma:
soma = has_soma(neurons, remote_instance=remote_instance)
neurons = [n for n in neurons if soma[n] is True]
logger.info('Done. {0} unique neurons found in volume(s) '
'{1}'.format(len(neurons),
','.join([v.name for v in volumes])))
return neurons
@cache.undo_on_error
def get_neurons_in_bbox(bbox, unit='NM', min_nodes=1, min_cable=1,
remote_instance=None, **kwargs):
"""Retrieve neurons with processes within a defined box volume.
Parameters
----------
bbox : list-like | dict | navis.Volume
Coordinates of the bounding box. Can be either:
1. List/np.array: ``[[left, right], [top, bottom], [z1, z2]]``
2. Dictionary ``{'left': int|float, 'right': ..., ...}``
unit : 'NM' | 'PIXEL'
Unit of your coordinates. Attention:
'PIXEL' will also assume that Z1/Z2 is in slices.
By default, xyz resolution of 4x4x40nm per pixel
is assumed. Pass e.g. ``res=[8, 8, 40]`` as keyword
argument to override this.
min_nodes : int, optional
Minimum node count for a neuron within given
bounding box.
min_cable : int, optional
Minimum cable length [nm] for a neuron within
given bounding box.
remote_instance : CatmaidInstance
If not passed directly, will try using global.
Returns
-------
list
``[skeleton_id, skeleton_id, ...]``
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(bbox, ns.Volume):
bbox = bbox.bbox
if isinstance(bbox, dict):
bbox = np.array([[bbox['left'], bbox['right']],
[bbox['top'], bbox['bottom']],
[bbox['z1'], bbox['z2']]
])
bbox = np.asarray(bbox)
if bbox.shape == (2, 3):
bbox = bbox.T
if unit == 'PIXEL':
res = np.asarray(kwargs.get('res', [4, 4, 40])).reshape(3, 1)
bbox *= res
url = remote_instance._get_skeletons_in_bbox(minx=min(bbox[0]),
maxx=max(bbox[0]),
miny=min(bbox[1]),
maxy=max(bbox[1]),
minz=min(bbox[2]),
maxz=max(bbox[2]),
min_nodes=min_nodes,
min_cable=min_cable)
return remote_instance.fetch(url)
@cache.undo_on_error
def get_user_list(remote_instance=None):
"""Get list of users.
Parameters
----------
remote_instance : CatmaidInstance
If not passed directly, will try using global.
Returns
------
pandas.DataFrame
DataFrame in which each row represents a user::
id login full_name first_name last_name color
0
1
...
Examples
--------
>>> user_list = pymaid.get_user_list()
>>> # To search for e.g. user ID 22
>>> user_list.set_index('id', inplace=True)
>>> user_list.loc[22]
id 22
login mustermannm
full_name Michaela Mustermann
first_name Michael
last_name Mustermann
color [0.91389, 0.877853, 1.0]
>>> user_list.reset_index(inplace=True)
>>> # To convert into a classic dict
>>> d = user_list.set_index('id').T.to_dict()
>>> d[22]['first_name']
... Michaela
"""
remote_instance = utils._eval_remote_instance(remote_instance)
user_list = remote_instance.fetch(remote_instance._get_user_list_url())
# It appears that for public CATMAID instances (like VFB) where the users
# are masked, the user-list endpoint can return just a single dictionary
# instead of a list of dicts.
if isinstance(user_list, dict):
user_list = [user_list]
# The user list can contain different entries
# Here we define alternative field names
columns = [('id', 'userid'),
('login', 'username'),
('full_name', 'long_name'),
('first_name', ),
('last_name', ),
('color', )]
data = []
for user in user_list:
row = []
for col in columns:
value = None
for key in col:
if key in user:
value = user[key]
break
row.append(value)
data.append(row)
df = pd.DataFrame(data, columns=[c[0] for c in columns])
df.sort_values(['login', 'id'], inplace=True)
df.reset_index(inplace=True, drop=True)
return df
@cache.undo_on_error
def get_paths(sources, targets, n_hops=2, min_synapses=1, return_graph=False,
remove_isolated=False, remote_instance=None):
"""Fetch paths between two sets of neurons.
Parameters
----------
sources
Source neurons.
targets
Target neurons. ``sources`` and ``targets`` can be:
1. list of skeleton ID(s) (int or str)
2. list of neuron name(s) (str, exact match)
3. an annotation as e.g. 'annotation:PN right'
4. CatmaidNeuron or CatmaidNeuronList object
n_hops : int | list | range, optional
Number of hops allowed between sources and
targets. Direct connection would be 1 hop.
1. int, e.g. ``n_hops=3`` will return paths with
EXACTLY 3 hops
2. list, e.g. ``n_hops=[2,4]`` will return all
paths with 2 and 4 hops
3. range, e.g. ``n_hops=range(2,4)`` will be converted
to a list and return paths with 2 and 3 hops.
min_synapses : int, optional
Minimum number of synpases between source and target.
return_graph : bool, optional
If True, will return NetworkX Graph (see below).
remove_isolated : bool, optional
Remove isolated nodes from NetworkX Graph. Only
relevant if ``return_graph=True``.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
paths : list
List of skeleton IDs that constitute paths from
sources to targets::
[[source1, ..., target1], [source2, ..., target2], ...]
networkx.DiGraph
Only if ``return_graph=True``. Graph contains all neurons that
connect sources and targets. **Important**: Does only contain
edges that connect sources and targets via max ``n_hops``!
Other edges have been removed.
Examples
--------
>>> # This assumes that you have already set up a CatmaidInstance
>>> import networkx as nx
>>> import matplotlib.pyplot as plt
>>> g, paths = pymaid.get_paths(['annotation:glomerulus DA1'],
... ['2333007'])
>>> g
<networkx.classes.digraph.DiGraph at 0x127d12390>
>>> paths
[['57381', '4376732', '2333007'], ['57323', '630823', '2333007'], ...
>>> nx.draw(g)
>>> plt.show()
"""
remote_instance = utils._eval_remote_instance(remote_instance)
sources = utils.eval_skids(sources, remote_instance=remote_instance)
targets = utils.eval_skids(targets, remote_instance=remote_instance)
targets = utils._make_iterable(targets).astype(int)
sources = utils._make_iterable(sources).astype(int)
if isinstance(n_hops, (int, np.int)):
n_hops = [n_hops]
if not utils._is_iterable(n_hops):
raise TypeError('Expected `n_hops` to be iterable or integer, got '
f'"{type(n_hops)}"')
if min(n_hops) <= 0:
raise ValueError('n_hops must not be <= 0')
# We need to query to endpoints:
# First get the neurons involved
response = []
url = remote_instance._get_graph_dps_url()
for h in range(1, max(n_hops) + 1):
if h == 1:
response += list(sources) + list(targets)
continue
post_data = {
'n_hops': h,
'min_synapses': min_synapses
}
for i, s in enumerate(sources):
post_data['sources[%i]' % i] = s
for i, t in enumerate(targets):
post_data['targets[%i]' % i] = t
# Response is just a set of skeleton IDs
response += remote_instance.fetch(url, post=post_data)
# Get unique edges
skids = np.unique(np.asarray(response).astype(int))
# Now get edges between those neurons
edges = get_edges(skids, remote_instance=remote_instance)
# Turn neurons into an NetworkX graph
g = ns.network2nx(edges, threshold=min_synapses)
# Get all paths between sources and targets
all_paths = [p for s in sources for t in targets for p in
nx.all_simple_paths(g, s, t,
cutoff=max(n_hops)) if len(p) - 1 in n_hops]
if not return_graph:
return all_paths
# Turn into edges
edges_to_keep = set([e for l in all_paths for e in nx.utils.pairwise(l)])
# Remove edges
g.remove_edges_from([e for e in g.edges if e not in edges_to_keep])
if remove_isolated:
# Remove isolated nodes
g.remove_nodes_from(list(nx.isolates(g)))
return all_paths, g
@cache.undo_on_error
def get_volume(volume_name=None, color=(120, 120, 120, .6), combine_vols=False,
remote_instance=None):
"""Retrieves volume (mesh).
Parameters
----------
volume_name : int | str | list of str or int
Name(s) (as ``str``) or ID (as ``int``) of the volume
to import. Names must be EXACT!
If ``volume_name=None``, will return list of all
available CATMAID volumes. If list of volume names,
will return a dictionary ``{name: Volume, ... }``
color : tuple, optional
(R, G, B, alpha) values used by :func:`~pymaid.plot3d`.
combine_vols : bool, optional
If True and multiple volumes are requested, the will
be combined into a single volume.
remote_instance : CATMAIDInstance, optional
If not passed directly, will try using global.
Returns
-------
navis.Volume
If ``volume_name`` is list of volumes, returns a dictionary of
Volumes: ``{name1: Volume1, name2: Volume2, ...}``
Examples
--------
>>> import pymaid
>>> rm = CatmaidInstance('server_url', 'api_token', 'http_user', 'http_pw')
>>> # Retrieve volume
>>> vol = pymaid.get_volume('LH_R')
>>> # Plot volume
>>> vol.plot3d()
"""
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(volume_name, type(None)):
logger.info('Retrieving list of available volumes.')
elif not isinstance(volume_name, (int, str, list, np.ndarray)):
raise TypeError('Volume name must be id (int), str or list of either, not {}.'.format(type(volume_name)))
volume_names = utils._make_iterable(volume_name)
# First, get volume IDs
get_volumes_url = remote_instance._get_volumes()
response = remote_instance.fetch(get_volumes_url)
all_vols = pd.DataFrame(response['data'], columns=response['columns'])
if isinstance(volume_name, type(None)):
return all_vols
req_vols = all_vols[(all_vols.name.isin(volume_names)) |
(all_vols.id.isin(volume_names))]
volume_ids = req_vols.id.values
if len(volume_ids) < len(volume_names):
not_found = set(volume_names).difference(set(all_vols.name) |
set(all_vols.id))
raise Exception(
'No volume(s) found for: {}'.format(','.join(not_found)))
url_list = [remote_instance._get_volume_details(v) for v in volume_ids]
# Get data
responses = remote_instance.fetch(url_list, desc='Volumes')
# Generate volume(s) from responses
volumes = {}
for r in responses:
mesh_str = r['mesh']
mesh_name = r['name']
mesh_id = r['id']
mesh_type = re.search('<(.*?) ', mesh_str).group(1)
# Now reverse engineer the mesh
if mesh_type == 'IndexedTriangleSet':
t = re.search("index='(.*?)'", mesh_str).group(1).split(' ')
faces = [(int(t[i]), int(t[i + 1]), int(t[i + 2]))
for i in range(0, len(t) - 2, 3)]
v = re.search("point='(.*?)'", mesh_str).group(1).split(' ')
vertices = [(float(v[i]), float(v[i + 1]), float(v[i + 2]))
for i in range(0, len(v) - 2, 3)]
elif mesh_type == 'IndexedFaceSet':
# For this type, each face is indexed and an index of -1 indicates
# the end of this face set
t = re.search("coordIndex='(.*?)'", mesh_str).group(1).split(' ')
faces = []
this_face = []
for f in t:
if int(f) != -1:
this_face.append(int(f))
else:
faces.append(this_face)
this_face = []
# Make sure the last face is also appended
faces.append(this_face)
v = re.search("point='(.*?)'", mesh_str).group(1).split(' ')
vertices = [(float(v[i]), float(v[i + 1]), float(v[i + 2]))
for i in range(0, len(v) - 2, 3)]
else:
logger.error("Unknown volume type: %s" % mesh_type)
raise Exception("Unknown volume type: %s" % mesh_type)
# In this format vertices are not unique - i.e. a given vertex defined
# by its x/y/z position shows up as many times as it participates in
# a face.
# Fortunately, navis.Volume being a subclass of trimesh.Trimesh takes
# care of the deduplication
v = ns.Volume(name=mesh_name,
volume_id=mesh_id,
vertices=vertices,
faces=faces,
color=color)
volumes[mesh_name] = v
# Return just the volume if a single one was requested
if len(volumes) == 1:
return list(volumes.values())[0]
return volumes
@cache.undo_on_error
def get_annotation_list(remote_instance=None):
"""Get a list of all annotations in the project.
Parameters
----------
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
pandas DataFrame
DataFrame in which each row represents an annotation::
name id users
0
1
...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
an = remote_instance.fetch(remote_instance._get_annotation_list())[
'annotations']
df = pd.DataFrame.from_dict(an)
return df
def url_to_coordinates(coords, stack_id, active_skeleton_id=None,
active_node_id=None, zoom=0, tool='tracingtool',
open_browser=False, remote_instance=None):
"""Generate URL to a location.
Parameters
----------
coords : list | np.ndarray | pandas.DataFrame
``x``, ``y``, ``z`` coordinates.
stack_id : int | list/array of ints
ID of the image stack you want to link to.
Depending on your setup this parameter might be
overriden by local user settings.
active_skeleton_id : int | list/array of ints, optional
Skeleton ID of the neuron that should be selected.
active_node_id : int | list/array of ints, optional
Node/Connector ID of the node that should be
active.
zoom : int, optional
tool : str, optional
open_browser : bool, optional
If True will open *all* generated URLs as new
tabs in the standard webbrowser.
remote_instance : CatmaidInstance, optional
If not passed directly, will try using global.
Returns
-------
str | list of str
URL(s) to the coordinates provided.
Examples
--------
>>> # Get URL for a single coordinate
>>> url = pymaid.url_to_coordinates([1000, 1000, 1000], stack_id=5)
>>> # Get URLs for all low-confidence nodes of a neuron
>>> n = pymaid.get_neuron(27295)
>>> low_c = n.nodes.loc[n.nodes.confidence < 5]
>>> urls = pymaid.url_to_coordinates(low_c[['x', 'y', 'z']].values,
... stack_id=5,
... active_node_id=low_c.node_id.values)
"""
def gen_url(c, stid, nid, sid):
""" This function generates the actual urls
"""
GET_data = {'pid': remote_instance.project_id,
'xp': int(c[0]),
'yp': int(c[1]),
'zp': int(c[2]),
'tool': tool,
'sid0': stid,
's0': zoom
}
if sid:
GET_data['active_skeleton_id'] = sid
if nid:
GET_data['active_node_id'] = nid
return(remote_instance.make_url('?%s' % urllib.parse.urlencode(GET_data)))
def list_helper(x):
""" Helper function to turn variables into lists matching length of coordinates
"""
if not isinstance(x, (list, np.ndarray)):
return [x] * len(coords)
elif len(x) != len(coords):
raise ValueError('Parameters must be the same shape as coords.')
else:
return x
remote_instance = utils._eval_remote_instance(remote_instance)
if isinstance(coords, (pd.DataFrame, pd.Series)):
try:
coords = coords[['x', 'y', 'z']].values
except BaseException:
raise ValueError(
'Pandas DataFrames must have "x","y" and "z" columns.')
elif isinstance(coords, list):
coords = np.array(coords)
if isinstance(coords, np.ndarray) and coords.ndim > 1:
stack_id = list_helper(stack_id)
active_skeleton_id = list_helper(active_skeleton_id)
active_node_id = list_helper(active_node_id)
urls = [gen_url(c, stid, nid, sid) for c, stid, nid, sid in zip(coords, stack_id, active_node_id, active_skeleton_id)]
if open_browser:
for u in urls:
webbrowser.open_new_tab(u)
return urls
else:
url = gen_url(coords, stack_id, active_node_id, active_skeleton_id)
if open_browser:
webbrowser.open_new_tab(url)
return url
@cache.undo_on_error
def get_node_location(x, sort=True, remote_instance=None):
"""Retrieves location for a set of nodes or connectors.
Parameters
----------
x : int | list of int
Node ID(s).
sort : bool, optional
If True, will sort returned DataFrame to be in the same
order as input data.
remote_instance : CatmaidInstance, optional
If not provided, will search for globally defined
remote instance.
Returns
-------
pandas.DataFrame
DataFrame in which each row represents a node::
node_id x y z
0
1
...
"""
remote_instance = utils._eval_remote_instance(remote_instance)
x = utils.eval_node_ids(x, connectors=True, nodes=True)
url = remote_instance._get_node_location_url()
| post = {'node_ids[{}]'.format(i): n for i, n in enumerate(x)} | 14,106 | lcc_e | python | null | 7ba93fa3c3cda095310c1e86d1f964e467c2b744a03c2d3f |
|
# This file was created automatically by SWIG 1.3.29.
# Don't modify this file, modify the SWIG interface instead.
import _richtext
import new
new_instancemethod = new.instancemethod
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
USE_TEXTATTREX = _richtext.USE_TEXTATTREX
import _windows
import _core
wx = _core
__docfilter__ = wx.__DocFilter(globals())
TEXT_ALIGNMENT_DEFAULT = _richtext.TEXT_ALIGNMENT_DEFAULT
TEXT_ALIGNMENT_LEFT = _richtext.TEXT_ALIGNMENT_LEFT
TEXT_ALIGNMENT_CENTRE = _richtext.TEXT_ALIGNMENT_CENTRE
TEXT_ALIGNMENT_CENTER = _richtext.TEXT_ALIGNMENT_CENTER
TEXT_ALIGNMENT_RIGHT = _richtext.TEXT_ALIGNMENT_RIGHT
TEXT_ALIGNMENT_JUSTIFIED = _richtext.TEXT_ALIGNMENT_JUSTIFIED
#---------------------------------------------------------------------------
RICHTEXT_TYPE_ANY = _richtext.RICHTEXT_TYPE_ANY
RICHTEXT_TYPE_TEXT = _richtext.RICHTEXT_TYPE_TEXT
RICHTEXT_TYPE_XML = _richtext.RICHTEXT_TYPE_XML
RICHTEXT_TYPE_HTML = _richtext.RICHTEXT_TYPE_HTML
RICHTEXT_TYPE_RTF = _richtext.RICHTEXT_TYPE_RTF
RICHTEXT_TYPE_PDF = _richtext.RICHTEXT_TYPE_PDF
RICHTEXT_FIXED_WIDTH = _richtext.RICHTEXT_FIXED_WIDTH
RICHTEXT_FIXED_HEIGHT = _richtext.RICHTEXT_FIXED_HEIGHT
RICHTEXT_VARIABLE_WIDTH = _richtext.RICHTEXT_VARIABLE_WIDTH
RICHTEXT_VARIABLE_HEIGHT = _richtext.RICHTEXT_VARIABLE_HEIGHT
RICHTEXT_LAYOUT_SPECIFIED_RECT = _richtext.RICHTEXT_LAYOUT_SPECIFIED_RECT
RICHTEXT_DRAW_IGNORE_CACHE = _richtext.RICHTEXT_DRAW_IGNORE_CACHE
RICHTEXT_HITTEST_NONE = _richtext.RICHTEXT_HITTEST_NONE
RICHTEXT_HITTEST_BEFORE = _richtext.RICHTEXT_HITTEST_BEFORE
RICHTEXT_HITTEST_AFTER = _richtext.RICHTEXT_HITTEST_AFTER
RICHTEXT_HITTEST_ON = _richtext.RICHTEXT_HITTEST_ON
RICHTEXT_HITTEST_OUTSIDE = _richtext.RICHTEXT_HITTEST_OUTSIDE
RICHTEXT_FORMATTED = _richtext.RICHTEXT_FORMATTED
RICHTEXT_UNFORMATTED = _richtext.RICHTEXT_UNFORMATTED
RICHTEXT_CACHE_SIZE = _richtext.RICHTEXT_CACHE_SIZE
RICHTEXT_HEIGHT_ONLY = _richtext.RICHTEXT_HEIGHT_ONLY
RICHTEXT_SETSTYLE_NONE = _richtext.RICHTEXT_SETSTYLE_NONE
RICHTEXT_SETSTYLE_WITH_UNDO = _richtext.RICHTEXT_SETSTYLE_WITH_UNDO
RICHTEXT_SETSTYLE_OPTIMIZE = _richtext.RICHTEXT_SETSTYLE_OPTIMIZE
RICHTEXT_SETSTYLE_PARAGRAPHS_ONLY = _richtext.RICHTEXT_SETSTYLE_PARAGRAPHS_ONLY
RICHTEXT_SETSTYLE_CHARACTERS_ONLY = _richtext.RICHTEXT_SETSTYLE_CHARACTERS_ONLY
RICHTEXT_SETSTYLE_RENUMBER = _richtext.RICHTEXT_SETSTYLE_RENUMBER
RICHTEXT_SETSTYLE_SPECIFY_LEVEL = _richtext.RICHTEXT_SETSTYLE_SPECIFY_LEVEL
RICHTEXT_SETSTYLE_RESET = _richtext.RICHTEXT_SETSTYLE_RESET
RICHTEXT_SETSTYLE_REMOVE = _richtext.RICHTEXT_SETSTYLE_REMOVE
RICHTEXT_INSERT_NONE = _richtext.RICHTEXT_INSERT_NONE
RICHTEXT_INSERT_WITH_PREVIOUS_PARAGRAPH_STYLE = _richtext.RICHTEXT_INSERT_WITH_PREVIOUS_PARAGRAPH_STYLE
RICHTEXT_INSERT_INTERACTIVE = _richtext.RICHTEXT_INSERT_INTERACTIVE
TEXT_ATTR_TEXT_COLOUR = _richtext.TEXT_ATTR_TEXT_COLOUR
TEXT_ATTR_BACKGROUND_COLOUR = _richtext.TEXT_ATTR_BACKGROUND_COLOUR
TEXT_ATTR_FONT_FACE = _richtext.TEXT_ATTR_FONT_FACE
TEXT_ATTR_FONT_SIZE = _richtext.TEXT_ATTR_FONT_SIZE
TEXT_ATTR_FONT_WEIGHT = _richtext.TEXT_ATTR_FONT_WEIGHT
TEXT_ATTR_FONT_ITALIC = _richtext.TEXT_ATTR_FONT_ITALIC
TEXT_ATTR_FONT_UNDERLINE = _richtext.TEXT_ATTR_FONT_UNDERLINE
TEXT_ATTR_FONT = _richtext.TEXT_ATTR_FONT
TEXT_ATTR_ALIGNMENT = _richtext.TEXT_ATTR_ALIGNMENT
TEXT_ATTR_LEFT_INDENT = _richtext.TEXT_ATTR_LEFT_INDENT
TEXT_ATTR_RIGHT_INDENT = _richtext.TEXT_ATTR_RIGHT_INDENT
TEXT_ATTR_TABS = _richtext.TEXT_ATTR_TABS
TEXT_ATTR_PARA_SPACING_AFTER = _richtext.TEXT_ATTR_PARA_SPACING_AFTER
TEXT_ATTR_PARA_SPACING_BEFORE = _richtext.TEXT_ATTR_PARA_SPACING_BEFORE
TEXT_ATTR_LINE_SPACING = _richtext.TEXT_ATTR_LINE_SPACING
TEXT_ATTR_CHARACTER_STYLE_NAME = _richtext.TEXT_ATTR_CHARACTER_STYLE_NAME
TEXT_ATTR_PARAGRAPH_STYLE_NAME = _richtext.TEXT_ATTR_PARAGRAPH_STYLE_NAME
TEXT_ATTR_BULLET_STYLE = _richtext.TEXT_ATTR_BULLET_STYLE
TEXT_ATTR_BULLET_NUMBER = _richtext.TEXT_ATTR_BULLET_NUMBER
TEXT_ATTR_BULLET_TEXT = _richtext.TEXT_ATTR_BULLET_TEXT
TEXT_ATTR_BULLET_NAME = _richtext.TEXT_ATTR_BULLET_NAME
TEXT_ATTR_URL = _richtext.TEXT_ATTR_URL
TEXT_ATTR_PAGE_BREAK = _richtext.TEXT_ATTR_PAGE_BREAK
TEXT_ATTR_EFFECTS = _richtext.TEXT_ATTR_EFFECTS
TEXT_ATTR_OUTLINE_LEVEL = _richtext.TEXT_ATTR_OUTLINE_LEVEL
TEXT_ATTR_KEEP_FIRST_PARA_STYLE = _richtext.TEXT_ATTR_KEEP_FIRST_PARA_STYLE
TEXT_ATTR_BULLET_STYLE_NONE = _richtext.TEXT_ATTR_BULLET_STYLE_NONE
TEXT_ATTR_BULLET_STYLE_ARABIC = _richtext.TEXT_ATTR_BULLET_STYLE_ARABIC
TEXT_ATTR_BULLET_STYLE_LETTERS_UPPER = _richtext.TEXT_ATTR_BULLET_STYLE_LETTERS_UPPER
TEXT_ATTR_BULLET_STYLE_LETTERS_LOWER = _richtext.TEXT_ATTR_BULLET_STYLE_LETTERS_LOWER
TEXT_ATTR_BULLET_STYLE_ROMAN_UPPER = _richtext.TEXT_ATTR_BULLET_STYLE_ROMAN_UPPER
TEXT_ATTR_BULLET_STYLE_ROMAN_LOWER = _richtext.TEXT_ATTR_BULLET_STYLE_ROMAN_LOWER
TEXT_ATTR_BULLET_STYLE_SYMBOL = _richtext.TEXT_ATTR_BULLET_STYLE_SYMBOL
TEXT_ATTR_BULLET_STYLE_BITMAP = _richtext.TEXT_ATTR_BULLET_STYLE_BITMAP
TEXT_ATTR_BULLET_STYLE_PARENTHESES = _richtext.TEXT_ATTR_BULLET_STYLE_PARENTHESES
TEXT_ATTR_BULLET_STYLE_PERIOD = _richtext.TEXT_ATTR_BULLET_STYLE_PERIOD
TEXT_ATTR_BULLET_STYLE_STANDARD = _richtext.TEXT_ATTR_BULLET_STYLE_STANDARD
TEXT_ATTR_BULLET_STYLE_RIGHT_PARENTHESIS = _richtext.TEXT_ATTR_BULLET_STYLE_RIGHT_PARENTHESIS
TEXT_ATTR_BULLET_STYLE_OUTLINE = _richtext.TEXT_ATTR_BULLET_STYLE_OUTLINE
TEXT_ATTR_BULLET_STYLE_ALIGN_LEFT = _richtext.TEXT_ATTR_BULLET_STYLE_ALIGN_LEFT
TEXT_ATTR_BULLET_STYLE_ALIGN_RIGHT = _richtext.TEXT_ATTR_BULLET_STYLE_ALIGN_RIGHT
TEXT_ATTR_BULLET_STYLE_ALIGN_CENTRE = _richtext.TEXT_ATTR_BULLET_STYLE_ALIGN_CENTRE
TEXT_ATTR_EFFECT_NONE = _richtext.TEXT_ATTR_EFFECT_NONE
TEXT_ATTR_EFFECT_CAPITALS = _richtext.TEXT_ATTR_EFFECT_CAPITALS
TEXT_ATTR_EFFECT_SMALL_CAPITALS = _richtext.TEXT_ATTR_EFFECT_SMALL_CAPITALS
TEXT_ATTR_EFFECT_STRIKETHROUGH = _richtext.TEXT_ATTR_EFFECT_STRIKETHROUGH
TEXT_ATTR_EFFECT_DOUBLE_STRIKETHROUGH = _richtext.TEXT_ATTR_EFFECT_DOUBLE_STRIKETHROUGH
TEXT_ATTR_EFFECT_SHADOW = _richtext.TEXT_ATTR_EFFECT_SHADOW
TEXT_ATTR_EFFECT_EMBOSS = _richtext.TEXT_ATTR_EFFECT_EMBOSS
TEXT_ATTR_EFFECT_OUTLINE = _richtext.TEXT_ATTR_EFFECT_OUTLINE
TEXT_ATTR_EFFECT_ENGRAVE = _richtext.TEXT_ATTR_EFFECT_ENGRAVE
TEXT_ATTR_EFFECT_SUPERSCRIPT = _richtext.TEXT_ATTR_EFFECT_SUPERSCRIPT
TEXT_ATTR_EFFECT_SUBSCRIPT = _richtext.TEXT_ATTR_EFFECT_SUBSCRIPT
TEXT_ATTR_LINE_SPACING_NORMAL = _richtext.TEXT_ATTR_LINE_SPACING_NORMAL
TEXT_ATTR_LINE_SPACING_HALF = _richtext.TEXT_ATTR_LINE_SPACING_HALF
TEXT_ATTR_LINE_SPACING_TWICE = _richtext.TEXT_ATTR_LINE_SPACING_TWICE
TEXT_ATTR_CHARACTER = _richtext.TEXT_ATTR_CHARACTER
TEXT_ATTR_PARAGRAPH = _richtext.TEXT_ATTR_PARAGRAPH
TEXT_ATTR_ALL = _richtext.TEXT_ATTR_ALL
#---------------------------------------------------------------------------
class RichTextRange(object):
"""
RichTextRange is a data structure that represents a range of text
within a `RichTextCtrl`. It simply contains integer ``start`` and
``end`` properties and a few operations useful for dealing with
ranges. In most places in wxPython where a RichTextRange is expected a
2-tuple containing (start, end) can be used instead.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, long start=0, long end=0) -> RichTextRange
Creates a new range object.
"""
_richtext.RichTextRange_swiginit(self,_richtext.new_RichTextRange(*args, **kwargs))
__swig_destroy__ = _richtext.delete_RichTextRange
__del__ = lambda self : None;
def __eq__(*args, **kwargs):
"""
__eq__(self, PyObject other) -> bool
Test for equality of RichTextRange objects.
"""
return _richtext.RichTextRange___eq__(*args, **kwargs)
def __sub__(*args, **kwargs):
"""__sub__(self, RichTextRange range) -> RichTextRange"""
return _richtext.RichTextRange___sub__(*args, **kwargs)
def __add__(*args, **kwargs):
"""__add__(self, RichTextRange range) -> RichTextRange"""
return _richtext.RichTextRange___add__(*args, **kwargs)
def SetRange(*args, **kwargs):
"""SetRange(self, long start, long end)"""
return _richtext.RichTextRange_SetRange(*args, **kwargs)
def SetStart(*args, **kwargs):
"""SetStart(self, long start)"""
return _richtext.RichTextRange_SetStart(*args, **kwargs)
def GetStart(*args, **kwargs):
"""GetStart(self) -> long"""
return _richtext.RichTextRange_GetStart(*args, **kwargs)
start = property(GetStart, SetStart)
def SetEnd(*args, **kwargs):
"""SetEnd(self, long end)"""
return _richtext.RichTextRange_SetEnd(*args, **kwargs)
def GetEnd(*args, **kwargs):
"""GetEnd(self) -> long"""
return _richtext.RichTextRange_GetEnd(*args, **kwargs)
end = property(GetEnd, SetEnd)
def IsOutside(*args, **kwargs):
"""
IsOutside(self, RichTextRange range) -> bool
Returns true if this range is completely outside 'range'
"""
return _richtext.RichTextRange_IsOutside(*args, **kwargs)
def IsWithin(*args, **kwargs):
"""
IsWithin(self, RichTextRange range) -> bool
Returns true if this range is completely within 'range'
"""
return _richtext.RichTextRange_IsWithin(*args, **kwargs)
def Contains(*args, **kwargs):
"""
Contains(self, long pos) -> bool
Returns true if the given position is within this range. Allow for the
possibility of an empty range - assume the position is within this
empty range.
"""
return _richtext.RichTextRange_Contains(*args, **kwargs)
def LimitTo(*args, **kwargs):
"""
LimitTo(self, RichTextRange range) -> bool
Limit this range to be within 'range'
"""
return _richtext.RichTextRange_LimitTo(*args, **kwargs)
def GetLength(*args, **kwargs):
"""
GetLength(self) -> long
Gets the length of the range
"""
return _richtext.RichTextRange_GetLength(*args, **kwargs)
def Swap(*args, **kwargs):
"""
Swap(self)
Swaps the start and end
"""
return _richtext.RichTextRange_Swap(*args, **kwargs)
def ToInternal(*args, **kwargs):
"""
ToInternal(self) -> RichTextRange
Convert to internal form: (n, n) is the range of a single character.
"""
return _richtext.RichTextRange_ToInternal(*args, **kwargs)
def FromInternal(*args, **kwargs):
"""
FromInternal(self) -> RichTextRange
Convert from internal to public API form: (n, n+1) is the range of a
single character.
"""
return _richtext.RichTextRange_FromInternal(*args, **kwargs)
def Get(*args, **kwargs):
"""
Get() -> (start,end)
Returns the start and end properties as a tuple.
"""
return _richtext.RichTextRange_Get(*args, **kwargs)
def __str__(self): return str(self.Get())
def __repr__(self): return 'RichTextRange'+str(self.Get())
def __len__(self): return len(self.Get())
def __getitem__(self, index): return self.Get()[index]
def __setitem__(self, index, val):
if index == 0: self.start = val
elif index == 1: self.end = val
else: raise IndexError
def __nonzero__(self): return self.Get() != (0,0)
__safe_for_unpickling__ = True
def __reduce__(self): return (RichTextRange, self.Get())
End = property(GetEnd,SetEnd,doc="See `GetEnd` and `SetEnd`")
Length = property(GetLength,doc="See `GetLength`")
Start = property(GetStart,SetStart,doc="See `GetStart` and `SetStart`")
_richtext.RichTextRange_swigregister(RichTextRange)
#---------------------------------------------------------------------------
class TextAttrEx(object):
"""
The TextAttrEx class stores information about the various attributes
for a block of text, including font, colour, indents, alignments, and
etc.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self) -> TextAttrEx
The TextAttrEx class stores information about the various attributes
for a block of text, including font, colour, indents, alignments, and
etc.
"""
_richtext.TextAttrEx_swiginit(self,_richtext.new_TextAttrEx(*args, **kwargs))
__swig_destroy__ = _richtext.delete_TextAttrEx
__del__ = lambda self : None;
def Init(*args, **kwargs):
"""Init(self)"""
return _richtext.TextAttrEx_Init(*args, **kwargs)
def Copy(*args, **kwargs):
"""Copy(self, TextAttrEx attr)"""
return _richtext.TextAttrEx_Copy(*args, **kwargs)
def SetTextColour(*args, **kwargs):
"""SetTextColour(self, Colour colText)"""
return _richtext.TextAttrEx_SetTextColour(*args, **kwargs)
def SetBackgroundColour(*args, **kwargs):
"""SetBackgroundColour(self, Colour colBack)"""
return _richtext.TextAttrEx_SetBackgroundColour(*args, **kwargs)
def SetFont(*args, **kwargs):
"""SetFont(self, Font font, long flags=TEXT_ATTR_FONT)"""
return _richtext.TextAttrEx_SetFont(*args, **kwargs)
def SetAlignment(*args, **kwargs):
"""SetAlignment(self, int alignment)"""
return _richtext.TextAttrEx_SetAlignment(*args, **kwargs)
def SetTabs(*args, **kwargs):
"""SetTabs(self, wxArrayInt tabs)"""
return _richtext.TextAttrEx_SetTabs(*args, **kwargs)
def SetLeftIndent(*args, **kwargs):
"""SetLeftIndent(self, int indent, int subIndent=0)"""
return _richtext.TextAttrEx_SetLeftIndent(*args, **kwargs)
def SetRightIndent(*args, **kwargs):
"""SetRightIndent(self, int indent)"""
return _richtext.TextAttrEx_SetRightIndent(*args, **kwargs)
def SetFlags(*args, **kwargs):
"""SetFlags(self, long flags)"""
return _richtext.TextAttrEx_SetFlags(*args, **kwargs)
def HasTextColour(*args, **kwargs):
"""HasTextColour(self) -> bool"""
return _richtext.TextAttrEx_HasTextColour(*args, **kwargs)
def HasBackgroundColour(*args, **kwargs):
"""HasBackgroundColour(self) -> bool"""
return _richtext.TextAttrEx_HasBackgroundColour(*args, **kwargs)
def HasFont(*args, **kwargs):
"""HasFont(self) -> bool"""
return _richtext.TextAttrEx_HasFont(*args, **kwargs)
def HasAlignment(*args, **kwargs):
"""HasAlignment(self) -> bool"""
return _richtext.TextAttrEx_HasAlignment(*args, **kwargs)
def HasTabs(*args, **kwargs):
"""HasTabs(self) -> bool"""
return _richtext.TextAttrEx_HasTabs(*args, **kwargs)
def HasLeftIndent(*args, **kwargs):
"""HasLeftIndent(self) -> bool"""
return _richtext.TextAttrEx_HasLeftIndent(*args, **kwargs)
def HasRightIndent(*args, **kwargs):
"""HasRightIndent(self) -> bool"""
return _richtext.TextAttrEx_HasRightIndent(*args, **kwargs)
def HasFlag(*args, **kwargs):
"""HasFlag(self, long flag) -> bool"""
return _richtext.TextAttrEx_HasFlag(*args, **kwargs)
def GetTextColour(*args, **kwargs):
"""GetTextColour(self) -> Colour"""
return _richtext.TextAttrEx_GetTextColour(*args, **kwargs)
def GetBackgroundColour(*args, **kwargs):
"""GetBackgroundColour(self) -> Colour"""
return _richtext.TextAttrEx_GetBackgroundColour(*args, **kwargs)
def GetFont(*args, **kwargs):
"""GetFont(self) -> Font"""
return _richtext.TextAttrEx_GetFont(*args, **kwargs)
def GetAlignment(*args, **kwargs):
"""GetAlignment(self) -> int"""
return _richtext.TextAttrEx_GetAlignment(*args, **kwargs)
def GetTabs(*args, **kwargs):
"""GetTabs(self) -> wxArrayInt"""
return _richtext.TextAttrEx_GetTabs(*args, **kwargs)
def GetLeftIndent(*args, **kwargs):
"""GetLeftIndent(self) -> long"""
return _richtext.TextAttrEx_GetLeftIndent(*args, **kwargs)
def GetLeftSubIndent(*args, **kwargs):
"""GetLeftSubIndent(self) -> long"""
return _richtext.TextAttrEx_GetLeftSubIndent(*args, **kwargs)
def GetRightIndent(*args, **kwargs):
"""GetRightIndent(self) -> long"""
return _richtext.TextAttrEx_GetRightIndent(*args, **kwargs)
def GetFlags(*args, **kwargs):
"""GetFlags(self) -> long"""
return _richtext.TextAttrEx_GetFlags(*args, **kwargs)
def SetCharacterStyleName(*args, **kwargs):
"""SetCharacterStyleName(self, String name)"""
return _richtext.TextAttrEx_SetCharacterStyleName(*args, **kwargs)
def SetParagraphStyleName(*args, **kwargs):
"""SetParagraphStyleName(self, String name)"""
return _richtext.TextAttrEx_SetParagraphStyleName(*args, **kwargs)
def SetListStyleName(*args, **kwargs):
"""SetListStyleName(self, String name)"""
return _richtext.TextAttrEx_SetListStyleName(*args, **kwargs)
def SetParagraphSpacingAfter(*args, **kwargs):
"""SetParagraphSpacingAfter(self, int spacing)"""
return _richtext.TextAttrEx_SetParagraphSpacingAfter(*args, **kwargs)
def SetParagraphSpacingBefore(*args, **kwargs):
"""SetParagraphSpacingBefore(self, int spacing)"""
return _richtext.TextAttrEx_SetParagraphSpacingBefore(*args, **kwargs)
def SetLineSpacing(*args, **kwargs):
"""SetLineSpacing(self, int spacing)"""
return _richtext.TextAttrEx_SetLineSpacing(*args, **kwargs)
def SetBulletStyle(*args, **kwargs):
"""SetBulletStyle(self, int style)"""
return _richtext.TextAttrEx_SetBulletStyle(*args, **kwargs)
def SetBulletNumber(*args, **kwargs):
"""SetBulletNumber(self, int n)"""
return _richtext.TextAttrEx_SetBulletNumber(*args, **kwargs)
def SetBulletText(*args, **kwargs):
"""SetBulletText(self, String text)"""
return _richtext.TextAttrEx_SetBulletText(*args, **kwargs)
def SetBulletName(*args, **kwargs):
"""SetBulletName(self, String name)"""
return _richtext.TextAttrEx_SetBulletName(*args, **kwargs)
def SetBulletFont(*args, **kwargs):
"""SetBulletFont(self, String bulletFont)"""
return _richtext.TextAttrEx_SetBulletFont(*args, **kwargs)
def SetURL(*args, **kwargs):
"""SetURL(self, String url)"""
return _richtext.TextAttrEx_SetURL(*args, **kwargs)
def SetPageBreak(*args, **kwargs):
"""SetPageBreak(self, bool pageBreak=True)"""
return _richtext.TextAttrEx_SetPageBreak(*args, **kwargs)
def SetTextEffects(*args, **kwargs):
"""SetTextEffects(self, int effects)"""
return _richtext.TextAttrEx_SetTextEffects(*args, **kwargs)
def SetTextEffectFlags(*args, **kwargs):
"""SetTextEffectFlags(self, int effects)"""
return _richtext.TextAttrEx_SetTextEffectFlags(*args, **kwargs)
def SetOutlineLevel(*args, **kwargs):
"""SetOutlineLevel(self, int level)"""
return _richtext.TextAttrEx_SetOutlineLevel(*args, **kwargs)
def SetFontSize(*args, **kwargs):
"""SetFontSize(self, int pointSize)"""
return _richtext.TextAttrEx_SetFontSize(*args, **kwargs)
def SetFontStyle(*args, **kwargs):
"""SetFontStyle(self, int fontStyle)"""
return _richtext.TextAttrEx_SetFontStyle(*args, **kwargs)
def SetFontWeight(*args, **kwargs):
"""SetFontWeight(self, int fontWeight)"""
return _richtext.TextAttrEx_SetFontWeight(*args, **kwargs)
def SetFontFaceName(*args, **kwargs):
"""SetFontFaceName(self, String faceName)"""
return _richtext.TextAttrEx_SetFontFaceName(*args, **kwargs)
def SetFontUnderlined(*args, **kwargs):
"""SetFontUnderlined(self, bool underlined)"""
return _richtext.TextAttrEx_SetFontUnderlined(*args, **kwargs)
def GetCharacterStyleName(*args, **kwargs):
"""GetCharacterStyleName(self) -> String"""
return _richtext.TextAttrEx_GetCharacterStyleName(*args, **kwargs)
def GetParagraphStyleName(*args, **kwargs):
"""GetParagraphStyleName(self) -> String"""
return _richtext.TextAttrEx_GetParagraphStyleName(*args, **kwargs)
def GetListStyleName(*args, **kwargs):
"""GetListStyleName(self) -> String"""
return _richtext.TextAttrEx_GetListStyleName(*args, **kwargs)
def GetParagraphSpacingAfter(*args, **kwargs):
"""GetParagraphSpacingAfter(self) -> int"""
return _richtext.TextAttrEx_GetParagraphSpacingAfter(*args, **kwargs)
def GetParagraphSpacingBefore(*args, **kwargs):
"""GetParagraphSpacingBefore(self) -> int"""
return _richtext.TextAttrEx_GetParagraphSpacingBefore(*args, **kwargs)
def GetLineSpacing(*args, **kwargs):
"""GetLineSpacing(self) -> int"""
return _richtext.TextAttrEx_GetLineSpacing(*args, **kwargs)
def GetBulletStyle(*args, **kwargs):
"""GetBulletStyle(self) -> int"""
return _richtext.TextAttrEx_GetBulletStyle(*args, **kwargs)
def GetBulletNumber(*args, **kwargs):
"""GetBulletNumber(self) -> int"""
return _richtext.TextAttrEx_GetBulletNumber(*args, **kwargs)
def GetBulletText(*args, **kwargs):
"""GetBulletText(self) -> String"""
return _richtext.TextAttrEx_GetBulletText(*args, **kwargs)
def GetBulletName(*args, **kwargs):
"""GetBulletName(self) -> String"""
return _richtext.TextAttrEx_GetBulletName(*args, **kwargs)
def GetBulletFont(*args, **kwargs):
"""GetBulletFont(self) -> String"""
return _richtext.TextAttrEx_GetBulletFont(*args, **kwargs)
def GetURL(*args, **kwargs):
"""GetURL(self) -> String"""
return _richtext.TextAttrEx_GetURL(*args, **kwargs)
def GetTextEffects(*args, **kwargs):
"""GetTextEffects(self) -> int"""
return _richtext.TextAttrEx_GetTextEffects(*args, **kwargs)
def GetTextEffectFlags(*args, **kwargs):
"""GetTextEffectFlags(self) -> int"""
return _richtext.TextAttrEx_GetTextEffectFlags(*args, **kwargs)
def GetOutlineLevel(*args, **kwargs):
"""GetOutlineLevel(self) -> int"""
return _richtext.TextAttrEx_GetOutlineLevel(*args, **kwargs)
def HasFontWeight(*args, **kwargs):
"""HasFontWeight(self) -> bool"""
return _richtext.TextAttrEx_HasFontWeight(*args, **kwargs)
def HasFontSize(*args, **kwargs):
"""HasFontSize(self) -> bool"""
return _richtext.TextAttrEx_HasFontSize(*args, **kwargs)
def HasFontItalic(*args, **kwargs):
"""HasFontItalic(self) -> bool"""
return _richtext.TextAttrEx_HasFontItalic(*args, **kwargs)
def HasFontUnderlined(*args, **kwargs):
"""HasFontUnderlined(self) -> bool"""
return _richtext.TextAttrEx_HasFontUnderlined(*args, **kwargs)
def HasFontFaceName(*args, **kwargs):
"""HasFontFaceName(self) -> bool"""
return _richtext.TextAttrEx_HasFontFaceName(*args, **kwargs)
def HasParagraphSpacingAfter(*args, **kwargs):
"""HasParagraphSpacingAfter(self) -> bool"""
return _richtext.TextAttrEx_HasParagraphSpacingAfter(*args, **kwargs)
def HasParagraphSpacingBefore(*args, **kwargs):
"""HasParagraphSpacingBefore(self) -> bool"""
return _richtext.TextAttrEx_HasParagraphSpacingBefore(*args, **kwargs)
def HasLineSpacing(*args, **kwargs):
"""HasLineSpacing(self) -> bool"""
return _richtext.TextAttrEx_HasLineSpacing(*args, **kwargs)
def HasCharacterStyleName(*args, **kwargs):
"""HasCharacterStyleName(self) -> bool"""
return _richtext.TextAttrEx_HasCharacterStyleName(*args, **kwargs)
def HasParagraphStyleName(*args, **kwargs):
"""HasParagraphStyleName(self) -> bool"""
return _richtext.TextAttrEx_HasParagraphStyleName(*args, **kwargs)
def HasListStyleName(*args, **kwargs):
"""HasListStyleName(self) -> bool"""
return _richtext.TextAttrEx_HasListStyleName(*args, **kwargs)
def HasBulletStyle(*args, **kwargs):
"""HasBulletStyle(self) -> bool"""
return _richtext.TextAttrEx_HasBulletStyle(*args, **kwargs)
def HasBulletNumber(*args, **kwargs):
"""HasBulletNumber(self) -> bool"""
return _richtext.TextAttrEx_HasBulletNumber(*args, **kwargs)
def HasBulletText(*args, **kwargs):
"""HasBulletText(self) -> bool"""
return _richtext.TextAttrEx_HasBulletText(*args, **kwargs)
def HasBulletName(*args, **kwargs):
"""HasBulletName(self) -> bool"""
return _richtext.TextAttrEx_HasBulletName(*args, **kwargs)
def HasURL(*args, **kwargs):
"""HasURL(self) -> bool"""
return _richtext.TextAttrEx_HasURL(*args, **kwargs)
def HasPageBreak(*args, **kwargs):
"""HasPageBreak(self) -> bool"""
return _richtext.TextAttrEx_HasPageBreak(*args, **kwargs)
def HasTextEffects(*args, **kwargs):
"""HasTextEffects(self) -> bool"""
return _richtext.TextAttrEx_HasTextEffects(*args, **kwargs)
def HasTextEffect(*args, **kwargs):
"""HasTextEffect(self, int effect) -> bool"""
return _richtext.TextAttrEx_HasTextEffect(*args, **kwargs)
def HasOutlineLevel(*args, **kwargs):
"""HasOutlineLevel(self) -> bool"""
return _richtext.TextAttrEx_HasOutlineLevel(*args, **kwargs)
def IsCharacterStyle(*args, **kwargs):
"""IsCharacterStyle(self) -> bool"""
return _richtext.TextAttrEx_IsCharacterStyle(*args, **kwargs)
def IsParagraphStyle(*args, **kwargs):
"""IsParagraphStyle(self) -> bool"""
return _richtext.TextAttrEx_IsParagraphStyle(*args, **kwargs)
def IsDefault(*args, **kwargs):
"""IsDefault(self) -> bool"""
return _richtext.TextAttrEx_IsDefault(*args, **kwargs)
def CombineEx(*args, **kwargs):
"""CombineEx(TextAttrEx attr, TextAttrEx attrDef, RichTextCtrl text) -> TextAttrEx"""
return _richtext.TextAttrEx_CombineEx(*args, **kwargs)
CombineEx = staticmethod(CombineEx)
Alignment = property(GetAlignment,SetAlignment)
BackgroundColour = property(GetBackgroundColour,SetBackgroundColour)
Flags = property(GetFlags,SetFlags)
Font = property(GetFont,SetFont)
LeftIndent = property(GetLeftIndent,SetLeftIndent)
LeftSubIndent = property(GetLeftSubIndent)
RightIndent = property(GetRightIndent,SetRightIndent)
Tabs = property(GetTabs,SetTabs)
TextColour = property(GetTextColour,SetTextColour)
CharacterStyleName = property(GetCharacterStyleName,SetCharacterStyleName)
ParagraphStyleName = property(GetParagraphStyleName,SetParagraphStyleName)
ListStyleName = property(GetListStyleName,SetListStyleName)
ParagraphSpacingAfter = property(GetParagraphSpacingAfter,SetParagraphSpacingAfter)
ParagraphSpacingBefore = property(GetParagraphSpacingBefore,SetParagraphSpacingBefore)
LineSpacing = property(GetLineSpacing,SetLineSpacing)
BulletStyle = property(GetBulletStyle,SetBulletStyle)
BulletNumber = property(GetBulletNumber,SetBulletNumber)
BulletText = property(GetBulletText,SetBulletText)
BulletName = property(GetBulletName,SetBulletName)
BulletFont = property(GetBulletFont,SetBulletFont)
URL = property(GetURL,SetURL)
TextEffects = property(GetTextEffects,SetTextEffects)
TextEffectFlags = property(GetTextEffectFlags,SetTextEffectFlags)
OutlineLevel = property(GetOutlineLevel,SetOutlineLevel)
_richtext.TextAttrEx_swigregister(TextAttrEx)
cvar = _richtext.cvar
RICHTEXT_ALL = cvar.RICHTEXT_ALL
RICHTEXT_NONE = cvar.RICHTEXT_NONE
def TextAttrEx_CombineEx(*args, **kwargs):
"""TextAttrEx_CombineEx(TextAttrEx attr, TextAttrEx attrDef, RichTextCtrl text) -> TextAttrEx"""
return _richtext.TextAttrEx_CombineEx(*args, **kwargs)
# an alias for compatibility
RichTextAttr = TextAttrEx
class RichTextObject(_core.Object):
"""
This is the base class for all drawable objects in a `RichTextCtrl`.
The data displayed in a `RichTextCtrl` is handled by `RichTextBuffer`,
and a `RichTextCtrl` always has one such buffer.
The content is represented by a hierarchy of objects, all derived from
`RichTextObject`. An object might be an image, a fragment of text, a
paragraph, or a whole buffer. Objects store a an attribute object
containing style information; a paragraph object can contain both
paragraph and character information, but content objects such as text
can only store character information. The final style displayed in the
control or in a printout is a combination of base style, paragraph
style and content (character) style.
The top of the hierarchy is the buffer, a kind of
`RichTextParagraphLayoutBox`. containing further `RichTextParagraph`
objects, each of which can include text, images and potentially other
types of objects.
Each object maintains a range (start and end position) measured from
the start of the main parent object.
When Layout is called on an object, it is given a size which the
object must limit itself to, or one or more flexible directions
(vertical or horizontal). So, for example, a centred paragraph is
given the page width to play with (minus any margins), but can extend
indefinitely in the vertical direction. The implementation of Layout
caches the calculated size and position.
When the buffer is modified, a range is invalidated (marked as
requiring layout), so that only the minimum amount of layout is
performed.
A paragraph of pure text with the same style contains just one further
object, a `RichTextPlainText` object. When styling is applied to part
of this object, the object is decomposed into separate objects, one
object for each different character style. So each object within a
paragraph always has just one attribute object to denote its character
style. Of course, this can lead to fragmentation after a lot of edit
operations, potentially leading to several objects with the same style
where just one would do. So a Defragment function is called when
updating the control's display, to ensure that the minimum number of
objects is used.
To implement your own RichTextObjects in Python you must derive a
class from `PyRichTextObject`, which has been instrumented to forward
the virtual C++ method calls to the Python methods in the derived
class. (This class hasn't been implemented yet!)
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _richtext.delete_RichTextObject
__del__ = lambda self : None;
def Draw(*args, **kwargs):
"""
Draw(self, DC dc, RichTextRange range, RichTextRange selectionRange,
Rect rect, int descent, int style) -> bool
"""
return _richtext.RichTextObject_Draw(*args, **kwargs)
def Layout(*args, **kwargs):
"""Layout(self, DC dc, Rect rect, int style) -> bool"""
return _richtext.RichTextObject_Layout(*args, **kwargs)
def HitTest(*args, **kwargs):
"""HitTest(self, DC dc, Point pt, long OUTPUT) -> int"""
return _richtext.RichTextObject_HitTest(*args, **kwargs)
def FindPosition(*args, **kwargs):
"""FindPosition(self, DC dc, long index, Point OUTPUT, int OUTPUT, bool forceLineStart) -> bool"""
return _richtext.RichTextObject_FindPosition(*args, **kwargs)
def GetBestSize(*args, **kwargs):
"""GetBestSize(self) -> Size"""
return _richtext.RichTextObject_GetBestSize(*args, **kwargs)
def GetRangeSize(*args, **kwargs):
"""
GetRangeSize(self, RichTextRange range, Size OUTPUT, int OUTPUT, DC dc,
int flags, Point position=wxPoint(0,0)) -> bool
"""
return _richtext.RichTextObject_GetRangeSize(*args, **kwargs)
def DoSplit(*args, **kwargs):
"""DoSplit(self, long pos) -> RichTextObject"""
return _richtext.RichTextObject_DoSplit(*args, **kwargs)
def CalculateRange(*args, **kwargs):
"""CalculateRange(self, long start, long OUTPUT)"""
return _richtext.RichTextObject_CalculateRange(*args, **kwargs)
def DeleteRange(*args, **kwargs):
"""DeleteRange(self, RichTextRange range) -> bool"""
return _richtext.RichTextObject_DeleteRange(*args, **kwargs)
def IsEmpty(*args, **kwargs):
"""IsEmpty(self) -> bool"""
return _richtext.RichTextObject_IsEmpty(*args, **kwargs)
def GetTextForRange(*args, **kwargs):
"""GetTextForRange(self, RichTextRange range) -> String"""
return _richtext.RichTextObject_GetTextForRange(*args, **kwargs)
def CanMerge(*args, **kwargs):
"""CanMerge(self, RichTextObject object) -> bool"""
return _richtext.RichTextObject_CanMerge(*args, **kwargs)
def Merge(self, obj):
"""Merge(self, RichTextObject object) -> bool"""
val = _richtext.RichTextObject_Merge(self, obj)
if val:
obj.this.own(True)
return val
def Dump(*args, **kwargs):
"""Dump(self) -> String"""
return _richtext.RichTextObject_Dump(*args, **kwargs)
def GetCachedSize(*args, **kwargs):
"""GetCachedSize(self) -> Size"""
return _richtext.RichTextObject_GetCachedSize(*args, **kwargs)
def SetCachedSize(*args, **kwargs):
"""SetCachedSize(self, Size sz)"""
return _richtext.RichTextObject_SetCachedSize(*args, **kwargs)
CachedSize = property(GetCachedSize,SetCachedSize)
def GetPosition(*args, **kwargs):
"""GetPosition(self) -> Point"""
return _richtext.RichTextObject_GetPosition(*args, **kwargs)
def SetPosition(*args, **kwargs):
"""SetPosition(self, Point pos)"""
return _richtext.RichTextObject_SetPosition(*args, **kwargs)
Position = property(GetPosition,SetPosition)
def GetRect(*args, **kwargs):
"""GetRect(self) -> Rect"""
return _richtext.RichTextObject_GetRect(*args, **kwargs)
Rect = property(GetRect)
def SetRange(*args, **kwargs):
"""SetRange(self, RichTextRange range)"""
return _richtext.RichTextObject_SetRange(*args, **kwargs)
def GetRange(*args, **kwargs):
"""GetRange(self) -> RichTextRange"""
return _richtext.RichTextObject_GetRange(*args, **kwargs)
Range = property(GetRange,SetRange)
def GetDirty(*args, **kwargs):
"""GetDirty(self) -> bool"""
return _richtext.RichTextObject_GetDirty(*args, **kwargs)
def SetDirty(*args, **kwargs):
"""SetDirty(self, bool dirty)"""
return _richtext.RichTextObject_SetDirty(*args, **kwargs)
Dirty = property(GetDirty,SetDirty)
def IsComposite(*args, **kwargs):
"""IsComposite(self) -> bool"""
return _richtext.RichTextObject_IsComposite(*args, **kwargs)
def GetParent(*args, **kwargs):
"""GetParent(self) -> RichTextObject"""
return _richtext.RichTextObject_GetParent(*args, **kwargs)
def SetParent(*args, **kwargs):
"""SetParent(self, RichTextObject parent)"""
return _richtext.RichTextObject_SetParent(*args, **kwargs)
Parent = property(GetParent,SetParent)
def SetSameMargins(*args, **kwargs):
"""SetSameMargins(self, int margin)"""
return _richtext.RichTextObject_SetSameMargins(*args, **kwargs)
def SetMargins(*args, **kwargs):
"""SetMargins(self, int leftMargin, int rightMargin, int topMargin, int bottomMargin)"""
return _richtext.RichTextObject_SetMargins(*args, **kwargs)
def GetLeftMargin(*args, **kwargs):
"""GetLeftMargin(self) -> int"""
return _richtext.RichTextObject_GetLeftMargin(*args, **kwargs)
def GetRightMargin(*args, **kwargs):
"""GetRightMargin(self) -> int"""
return _richtext.RichTextObject_GetRightMargin(*args, **kwargs)
def GetTopMargin(*args, **kwargs):
"""GetTopMargin(self) -> int"""
return _richtext.RichTextObject_GetTopMargin(*args, **kwargs)
def GetBottomMargin(*args, **kwargs):
"""GetBottomMargin(self) -> int"""
return _richtext.RichTextObject_GetBottomMargin(*args, **kwargs)
def SetAttributes(*args, **kwargs):
"""SetAttributes(self, TextAttrEx attr)"""
return _richtext.RichTextObject_SetAttributes(*args, **kwargs)
def GetAttributes(*args, **kwargs):
"""GetAttributes(self) -> TextAttrEx"""
return _richtext.RichTextObject_GetAttributes(*args, **kwargs)
Attributes = property(GetAttributes,SetAttributes)
def SetDescent(*args, **kwargs):
"""SetDescent(self, int descent)"""
return _richtext.RichTextObject_SetDescent(*args, **kwargs)
def GetDescent(*args, **kwargs):
"""GetDescent(self) -> int"""
return _richtext.RichTextObject_GetDescent(*args, **kwargs)
Descent = property(GetDescent,SetDescent)
def GetBuffer(*args, **kwargs):
"""GetBuffer(self) -> RichTextBuffer"""
return _richtext.RichTextObject_GetBuffer(*args, **kwargs)
def Clone(*args, **kwargs):
"""Clone(self) -> RichTextObject"""
return _richtext.RichTextObject_Clone(*args, **kwargs)
def Copy(*args, **kwargs):
"""Copy(self, RichTextObject obj)"""
return _richtext.RichTextObject_Copy(*args, **kwargs)
def Reference(*args, **kwargs):
"""Reference(self)"""
return _richtext.RichTextObject_Reference(*args, **kwargs)
def Dereference(*args, **kwargs):
"""Dereference(self)"""
return _richtext.RichTextObject_Dereference(*args, **kwargs)
def ConvertTenthsMMToPixelsDC(*args, **kwargs):
"""ConvertTenthsMMToPixelsDC(self, DC dc, int units) -> int"""
return _richtext.RichTextObject_ConvertTenthsMMToPixelsDC(*args, **kwargs)
def ConvertTenthsMMToPixels(*args, **kwargs):
"""ConvertTenthsMMToPixels(int ppi, int units) -> int"""
return _richtext.RichTextObject_ConvertTenthsMMToPixels(*args, **kwargs)
ConvertTenthsMMToPixels = staticmethod(ConvertTenthsMMToPixels)
_richtext.RichTextObject_swigregister(RichTextObject)
def RichTextObject_ConvertTenthsMMToPixels(*args, **kwargs):
"""RichTextObject_ConvertTenthsMMToPixels(int ppi, int units) -> int"""
return _richtext.RichTextObject_ConvertTenthsMMToPixels(*args, **kwargs)
class RichTextObjectList_iterator(object):
"""This class serves as an iterator for a wxRichTextObjectList object."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _richtext.delete_RichTextObjectList_iterator
__del__ = lambda self : None;
def next(*args, **kwargs):
"""next(self) -> RichTextObject"""
return _richtext.RichTextObjectList_iterator_next(*args, **kwargs)
_richtext.RichTextObjectList_iterator_swigregister(RichTextObjectList_iterator)
class RichTextObjectList(object):
"""
This class wraps a wxList-based class and gives it a Python
sequence-like interface. Sequence operations supported are length,
index access and iteration.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _richtext.delete_RichTextObjectList
__del__ = lambda self : None;
def __len__(*args, **kwargs):
"""__len__(self) -> size_t"""
return _richtext.RichTextObjectList___len__(*args, **kwargs)
def __getitem__(*args, **kwargs):
"""__getitem__(self, size_t index) -> RichTextObject"""
return _richtext.RichTextObjectList___getitem__(*args, **kwargs)
def __contains__(*args, **kwargs):
"""__contains__(self, RichTextObject obj) -> bool"""
return _richtext.RichTextObjectList___contains__(*args, **kwargs)
def __iter__(*args, **kwargs):
"""__iter__(self) -> RichTextObjectList_iterator"""
return _richtext.RichTextObjectList___iter__(*args, **kwargs)
def index(*args, **kwargs):
"""index(self, RichTextObject obj) -> int"""
return _richtext.RichTextObjectList_index(*args, **kwargs)
def __repr__(self):
return "wxRichTextObjectList: " + repr(list(self))
_richtext.RichTextObjectList_swigregister(RichTextObjectList)
class RichTextCompositeObject(RichTextObject):
"""Objects of this class can contain other rich text objects."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _richtext.delete_RichTextCompositeObject
__del__ = lambda self : None;
def GetChildren(*args, **kwargs):
"""GetChildren(self) -> RichTextObjectList"""
return _richtext.RichTextCompositeObject_GetChildren(*args, **kwargs)
def GetChildCount(*args, **kwargs):
"""GetChildCount(self) -> size_t"""
return _richtext.RichTextCompositeObject_GetChildCount(*args, **kwargs)
def GetChild(*args, **kwargs):
"""GetChild(self, size_t n) -> RichTextObject"""
return _richtext.RichTextCompositeObject_GetChild(*args, **kwargs)
def Copy(*args, **kwargs):
"""Copy(self, RichTextCompositeObject obj)"""
return _richtext.RichTextCompositeObject_Copy(*args, **kwargs)
def AppendChild(*args, **kwargs):
"""AppendChild(self, RichTextObject child) -> size_t"""
return _richtext.RichTextCompositeObject_AppendChild(*args, **kwargs)
def InsertChild(*args, **kwargs):
"""InsertChild(self, RichTextObject child, RichTextObject inFrontOf) -> bool"""
return _richtext.RichTextCompositeObject_InsertChild(*args, **kwargs)
def RemoveChild(self, child, deleteChild=False):
val = _richtext.RichTextCompositeObject_RemoveChild(self, child, deleteChild)
self.this.own(not deleteChild)
return val
def DeleteChildren(*args, **kwargs):
"""DeleteChildren(self) -> bool"""
return _richtext.RichTextCompositeObject_DeleteChildren(*args, **kwargs)
def Defragment(*args, **kwargs):
"""Defragment(self) -> bool"""
return _richtext.RichTextCompositeObject_Defragment(*args, **kwargs)
_richtext.RichTextCompositeObject_swigregister(RichTextCompositeObject)
class RichTextBox(RichTextCompositeObject):
"""This defines a 2D space to lay out objects."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, RichTextObject parent=None) -> RichTextBox
This defines a 2D space to lay out objects.
"""
_richtext.RichTextBox_swiginit(self,_richtext.new_RichTextBox(*args, **kwargs))
def Copy(*args, **kwargs):
"""Copy(self, RichTextBox obj)"""
return _richtext.RichTextBox_Copy(*args, **kwargs)
_richtext.RichTextBox_swigregister(RichTextBox)
class RichTextParagraphLayoutBox(RichTextBox):
"""Proxy of C++ RichTextParagraphLayoutBox class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, RichTextObject parent=None) -> RichTextParagraphLayoutBox"""
_richtext.RichTextParagraphLayoutBox_swiginit(self,_richtext.new_RichTextParagraphLayoutBox(*args, **kwargs))
def SetRichTextCtrl(*args, **kwargs):
"""SetRichTextCtrl(self, RichTextCtrl ctrl)"""
return _richtext.RichTextParagraphLayoutBox_SetRichTextCtrl(*args, **kwargs)
def GetRichTextCtrl(*args, **kwargs):
"""GetRichTextCtrl(self) -> RichTextCtrl"""
return _richtext.RichTextParagraphLayoutBox_GetRichTextCtrl(*args, **kwargs)
def SetPartialParagraph(*args, **kwargs):
"""SetPartialParagraph(self, bool partialPara)"""
return _richtext.RichTextParagraphLayoutBox_SetPartialParagraph(*args, **kwargs)
def GetPartialParagraph(*args, **kwargs):
"""GetPartialParagraph(self) -> bool"""
return _richtext.RichTextParagraphLayoutBox_GetPartialParagraph(*args, **kwargs)
def GetStyleSheet(*args, **kwargs):
"""GetStyleSheet(self) -> wxRichTextStyleSheet"""
return _richtext.RichTextParagraphLayoutBox_GetStyleSheet(*args, **kwargs)
def Init(*args, **kwargs):
"""Init(self)"""
return _richtext.RichTextParagraphLayoutBox_Init(*args, **kwargs)
def Clear(*args, **kwargs):
"""Clear(self)"""
return _richtext.RichTextParagraphLayoutBox_Clear(*args, **kwargs)
def Reset(*args, **kwargs):
"""Reset(self)"""
return _richtext.RichTextParagraphLayoutBox_Reset(*args, **kwargs)
def AddParagraph(*args, **kwargs):
"""AddParagraph(self, String text, TextAttrEx paraStyle=None) -> RichTextRange"""
return _richtext.RichTextParagraphLayoutBox_AddParagraph(*args, **kwargs)
def AddImage(*args, **kwargs):
"""AddImage(self, Image image, TextAttrEx paraStyle=None) -> RichTextRange"""
return _richtext.RichTextParagraphLayoutBox_AddImage(*args, **kwargs)
def AddParagraphs(*args, **kwargs):
"""AddParagraphs(self, String text, TextAttrEx paraStyle=None) -> RichTextRange"""
return _richtext.RichTextParagraphLayoutBox_AddParagraphs(*args, **kwargs)
def GetLineAtPosition(*args, **kwargs):
"""GetLineAtPosition(self, long pos, bool caretPosition=False) -> RichTextLine"""
return _richtext.RichTextParagraphLayoutBox_GetLineAtPosition(*args, **kwargs)
def GetLineAtYPosition(*args, **kwargs):
"""GetLineAtYPosition(self, int y) -> RichTextLine"""
return _richtext.RichTextParagraphLayoutBox_GetLineAtYPosition(*args, **kwargs)
def GetParagraphAtPosition(*args, **kwargs):
"""GetParagraphAtPosition(self, long pos, bool caretPosition=False) -> RichTextParagraph"""
return _richtext.RichTextParagraphLayoutBox_GetParagraphAtPosition(*args, **kwargs)
def GetLineSizeAtPosition(*args, **kwargs):
"""GetLineSizeAtPosition(self, long pos, bool caretPosition=False) -> Size"""
return _richtext.RichTextParagraphLayoutBox_GetLineSizeAtPosition(*args, **kwargs)
def GetVisibleLineNumber(*args, **kwargs):
"""GetVisibleLineNumber(self, long pos, bool caretPosition=False, bool startOfLine=False) -> long"""
return _richtext.RichTextParagraphLayoutBox_GetVisibleLineNumber(*args, **kwargs)
def GetLineForVisibleLineNumber(*args, **kwargs):
"""GetLineForVisibleLineNumber(self, long lineNumber) -> RichTextLine"""
return _richtext.RichTextParagraphLayoutBox_GetLineForVisibleLineNumber(*args, **kwargs)
def GetLeafObjectAtPosition(*args, **kwargs):
"""GetLeafObjectAtPosition(self, long position) -> RichTextObject"""
return _richtext.RichTextParagraphLayoutBox_GetLeafObjectAtPosition(*args, **kwargs)
def GetParagraphAtLine(*args, **kwargs):
"""GetParagraphAtLine(self, long paragraphNumber) -> RichTextParagraph"""
return _richtext.RichTextParagraphLayoutBox_GetParagraphAtLine(*args, **kwargs)
def GetParagraphForLine(*args, **kwargs):
"""GetParagraphForLine(self, RichTextLine line) -> RichTextParagraph"""
return _richtext.RichTextParagraphLayoutBox_GetParagraphForLine(*args, **kwargs)
def GetParagraphLength(*args, **kwargs):
"""GetParagraphLength(self, long paragraphNumber) -> int"""
return _richtext.RichTextParagraphLayoutBox_GetParagraphLength(*args, **kwargs)
def GetParagraphCount(*args, **kwargs):
"""GetParagraphCount(self) -> int"""
return _richtext.RichTextParagraphLayoutBox_GetParagraphCount(*args, **kwargs)
def GetLineCount(*args, **kwargs):
"""GetLineCount(self) -> int"""
return _richtext.RichTextParagraphLayoutBox_GetLineCount(*args, **kwargs)
def GetParagraphText(*args, **kwargs):
"""GetParagraphText(self, long paragraphNumber) -> String"""
return _richtext.RichTextParagraphLayoutBox_GetParagraphText(*args, **kwargs)
def XYToPosition(*args, **kwargs):
"""XYToPosition(self, long x, long y) -> long"""
return _richtext.RichTextParagraphLayoutBox_XYToPosition(*args, **kwargs)
def PositionToXY(*args, **kwargs):
"""PositionToXY(self, long pos, long x, long y) -> bool"""
return _richtext.RichTextParagraphLayoutBox_PositionToXY(*args, **kwargs)
def SetStyle(*args, **kwargs):
"""SetStyle(self, RichTextRange range, TextAttrEx style, int flags=RICHTEXT_SETSTYLE_WITH_UNDO) -> bool"""
return _richtext.RichTextParagraphLayoutBox_SetStyle(*args, **kwargs)
def GetStyle(*args, **kwargs):
"""GetStyle(self, long position, TextAttrEx style) -> bool"""
return _richtext.RichTextParagraphLayoutBox_GetStyle(*args, **kwargs)
def GetUncombinedStyle(*args, **kwargs):
"""GetUncombinedStyle(self, long position, TextAttrEx style) -> bool"""
return _richtext.RichTextParagraphLayoutBox_GetUncombinedStyle(*args, **kwargs)
def GetStyleForRange(*args, **kwargs):
"""GetStyleForRange(self, RichTextRange range, TextAttrEx style) -> bool"""
return _richtext.RichTextParagraphLayoutBox_GetStyleForRange(*args, **kwargs)
def CollectStyle(*args, **kwargs):
"""
CollectStyle(self, TextAttrEx currentStyle, TextAttrEx style, long multipleStyleAttributes,
int multipleTextEffectAttributes) -> bool
"""
return _richtext.RichTextParagraphLayoutBox_CollectStyle(*args, **kwargs)
def SetListStyle(*args, **kwargs):
"""
SetListStyle(self, RichTextRange range, String defName, int flags=RICHTEXT_SETSTYLE_WITH_UNDO,
int startFrom=1, int specifiedLevel=-1) -> bool
"""
return _richtext.RichTextParagraphLayoutBox_SetListStyle(*args, **kwargs)
def ClearListStyle(*args, **kwargs):
"""ClearListStyle(self, RichTextRange range, int flags=RICHTEXT_SETSTYLE_WITH_UNDO) -> bool"""
return _richtext.RichTextParagraphLayoutBox_ClearListStyle(*args, **kwargs)
def NumberList(*args, **kwargs):
"""
NumberList(self, RichTextRange range, String defName, int flags=RICHTEXT_SETSTYLE_WITH_UNDO,
int startFrom=1, int specifiedLevel=-1) -> bool
"""
return _richtext.RichTextParagraphLayoutBox_NumberList(*args, **kwargs)
def PromoteList(*args, **kwargs):
"""
PromoteList(self, int promoteBy, RichTextRange range, String defName,
int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int specifiedLevel=-1) -> bool
"""
return _richtext.RichTextParagraphLayoutBox_PromoteList(*args, **kwargs)
def DoNumberList(*args, **kwargs):
"""
DoNumberList(self, RichTextRange range, RichTextRange promotionRange,
int promoteBy, wxRichTextListStyleDefinition def,
int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int startFrom=1,
int specifiedLevel=-1) -> bool
"""
return _richtext.RichTextParagraphLayoutBox_DoNumberList(*args, **kwargs)
def FindNextParagraphNumber(*args, **kwargs):
"""FindNextParagraphNumber(self, RichTextParagraph previousParagraph, TextAttrEx attr) -> bool"""
return _richtext.RichTextParagraphLayoutBox_FindNextParagraphNumber(*args, **kwargs)
def HasCharacterAttributes(*args, **kwargs):
"""HasCharacterAttributes(self, RichTextRange range, TextAttrEx style) -> bool"""
return _richtext.RichTextParagraphLayoutBox_HasCharacterAttributes(*args, **kwargs)
def HasParagraphAttributes(*args, **kwargs):
"""HasParagraphAttributes(self, RichTextRange range, TextAttrEx style) -> bool"""
return _richtext.RichTextParagraphLayoutBox_HasParagraphAttributes(*args, **kwargs)
def InsertFragment(*args, **kwargs):
"""InsertFragment(self, long position, RichTextParagraphLayoutBox fragment) -> bool"""
return _richtext.RichTextParagraphLayoutBox_InsertFragment(*args, **kwargs)
def CopyFragment(*args, **kwargs):
"""CopyFragment(self, RichTextRange range, RichTextParagraphLayoutBox fragment) -> bool"""
return _richtext.RichTextParagraphLayoutBox_CopyFragment(*args, **kwargs)
def ApplyStyleSheet(*args, **kwargs):
"""ApplyStyleSheet(self, wxRichTextStyleSheet styleSheet) -> bool"""
return _richtext.RichTextParagraphLayoutBox_ApplyStyleSheet(*args, **kwargs)
def Copy(*args, **kwargs):
"""Copy(self, RichTextParagraphLayoutBox obj)"""
return _richtext.RichTextParagraphLayoutBox_Copy(*args, **kwargs)
def UpdateRanges(*args, **kwargs):
"""UpdateRanges(self)"""
return _richtext.RichTextParagraphLayoutBox_UpdateRanges(*args, **kwargs)
def GetText(*args, **kwargs):
"""GetText(self) -> String"""
return _richtext.RichTextParagraphLayoutBox_GetText(*args, **kwargs)
def SetDefaultStyle(*args, **kwargs):
"""SetDefaultStyle(self, TextAttrEx style) -> bool"""
return _richtext.RichTextParagraphLayoutBox_SetDefaultStyle(*args, **kwargs)
def GetDefaultStyle(*args, **kwargs):
"""GetDefaultStyle(self) -> TextAttrEx"""
return _richtext.RichTextParagraphLayoutBox_GetDefaultStyle(*args, **kwargs)
def SetBasicStyle(*args, **kwargs):
"""SetBasicStyle(self, TextAttrEx style)"""
return _richtext.RichTextParagraphLayoutBox_SetBasicStyle(*args, **kwargs)
def GetBasicStyle(*args, **kwargs):
"""GetBasicStyle(self) -> TextAttrEx"""
return _richtext.RichTextParagraphLayoutBox_GetBasicStyle(*args, **kwargs)
def Invalidate(*args, **kwargs):
"""Invalidate(self, RichTextRange invalidRange=wxRICHTEXT_ALL)"""
return _richtext.RichTextParagraphLayoutBox_Invalidate(*args, **kwargs)
def GetInvalidRange(*args, **kwargs):
"""GetInvalidRange(self, bool wholeParagraphs=False) -> RichTextRange"""
return _richtext.RichTextParagraphLayoutBox_GetInvalidRange(*args, **kwargs)
_richtext.RichTextParagraphLayoutBox_swigregister(RichTextParagraphLayoutBox)
class RichTextLine(object):
"""
This object represents a line in a paragraph, and stores offsets from
the start of the paragraph representing the start and end positions of
the line.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, RichTextParagraph parent) -> RichTextLine
This object represents a line in a paragraph, and stores offsets from
the start of the paragraph representing the start and end positions of
the line.
"""
_richtext.RichTextLine_swiginit(self,_richtext.new_RichTextLine(*args, **kwargs))
__swig_destroy__ = _richtext.delete_RichTextLine
__del__ = lambda self : None;
def SetRange(*args, **kwargs):
"""SetRange(self, RichTextRange range)"""
return _richtext.RichTextLine_SetRange(*args, **kwargs)
def GetParent(*args, **kwargs):
"""GetParent(self) -> RichTextParagraph"""
return _richtext.RichTextLine_GetParent(*args, **kwargs)
def GetRange(*args, **kwargs):
"""GetRange(self) -> RichTextRange"""
return _richtext.RichTextLine_GetRange(*args, **kwargs)
def GetAbsoluteRange(*args, **kwargs):
"""GetAbsoluteRange(self) -> RichTextRange"""
return _richtext.RichTextLine_GetAbsoluteRange(*args, **kwargs)
def GetSize(*args, **kwargs):
"""GetSize(self) -> Size"""
return _richtext.RichTextLine_GetSize(*args, **kwargs)
def SetSize(*args, **kwargs):
"""SetSize(self, Size sz)"""
return _richtext.RichTextLine_SetSize(*args, **kwargs)
def GetPosition(*args, **kwargs):
"""GetPosition(self) -> Point"""
return _richtext.RichTextLine_GetPosition(*args, **kwargs)
def SetPosition(*args, **kwargs):
"""SetPosition(self, Point pos)"""
return _richtext.RichTextLine_SetPosition(*args, **kwargs)
def GetAbsolutePosition(*args, **kwargs):
"""GetAbsolutePosition(self) -> Point"""
return _richtext.RichTextLine_GetAbsolutePosition(*args, **kwargs)
def GetRect(*args, **kwargs):
"""GetRect(self) -> Rect"""
return _richtext.RichTextLine_GetRect(*args, **kwargs)
def SetDescent(*args, **kwargs):
"""SetDescent(self, int descent)"""
return _richtext.RichTextLine_SetDescent(*args, **kwargs)
def GetDescent(*args, **kwargs):
"""GetDescent(self) -> int"""
return _richtext.RichTextLine_GetDescent(*args, **kwargs)
def Init(*args, **kwargs):
"""Init(self, RichTextParagraph parent)"""
return _richtext.RichTextLine_Init(*args, **kwargs)
def Copy(*args, **kwargs):
"""Copy(self, RichTextLine obj)"""
return _richtext.RichTextLine_Copy(*args, **kwargs)
def Clone(*args, **kwargs):
"""Clone(self) -> RichTextLine"""
return _richtext.RichTextLine_Clone(*args, **kwargs)
_richtext.RichTextLine_swigregister(RichTextLine)
class RichTextParagraph(RichTextBox):
"""
This object represents a single paragraph (or in a straight text
editor, a line).
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, String text, RichTextObject parent=None, TextAttrEx paraStyle=None,
TextAttrEx charStyle=None) -> RichTextParagraph
This object represents a single paragraph (or in a straight text
editor, a line).
"""
_richtext.RichTextParagraph_swiginit(self,_richtext.new_RichTextParagraph(*args, **kwargs))
__swig_destroy__ = _richtext.delete_RichTextParagraph
__del__ = lambda self : None;
def GetLines(*args, **kwargs):
"""GetLines(self) -> wxRichTextLineList"""
return _richtext.RichTextParagraph_GetLines(*args, **kwargs)
def Copy(*args, **kwargs):
"""Copy(self, RichTextParagraph obj)"""
return _richtext.RichTextParagraph_Copy(*args, **kwargs)
def ClearLines(*args, **kwargs):
"""ClearLines(self)"""
return _richtext.RichTextParagraph_ClearLines(*args, **kwargs)
def ApplyParagraphStyle(*args, **kwargs):
"""ApplyParagraphStyle(self, TextAttrEx attr, Rect rect)"""
return _richtext.RichTextParagraph_ApplyParagraphStyle(*args, **kwargs)
def InsertText(*args, **kwargs):
"""InsertText(self, long pos, String text) -> bool"""
return _richtext.RichTextParagraph_InsertText(*args, **kwargs)
def SplitAt(*args, **kwargs):
"""SplitAt(self, long pos, RichTextObject previousObject=None) -> RichTextObject"""
return _richtext.RichTextParagraph_SplitAt(*args, **kwargs)
def MoveToList(*args, **kwargs):
"""MoveToList(self, RichTextObject obj, wxList list)"""
return _richtext.RichTextParagraph_MoveToList(*args, **kwargs)
def MoveFromList(*args, **kwargs):
"""MoveFromList(self, wxList list)"""
return _richtext.RichTextParagraph_MoveFromList(*args, **kwargs)
def GetContiguousPlainText(*args, **kwargs):
"""GetContiguousPlainText(self, String text, RichTextRange range, bool fromStart=True) -> bool"""
return _richtext.RichTextParagraph_GetContiguousPlainText(*args, **kwargs)
def FindWrapPosition(*args, **kwargs):
"""FindWrapPosition(self, RichTextRange range, DC dc, int availableSpace, long wrapPosition) -> bool"""
return _richtext.RichTextParagraph_FindWrapPosition(*args, **kwargs)
def FindObjectAtPosition(*args, **kwargs):
"""FindObjectAtPosition(self, long position) -> RichTextObject"""
return _richtext.RichTextParagraph_FindObjectAtPosition(*args, **kwargs)
def GetBulletText(*args, **kwargs):
"""GetBulletText(self) -> String"""
return _richtext.RichTextParagraph_GetBulletText(*args, **kwargs)
def AllocateLine(*args, **kwargs):
"""AllocateLine(self, int pos) -> RichTextLine"""
return _richtext.RichTextParagraph_AllocateLine(*args, **kwargs)
def ClearUnusedLines(*args, **kwargs):
"""ClearUnusedLines(self, int lineCount) -> bool"""
return _richtext.RichTextParagraph_ClearUnusedLines(*args, **kwargs)
def GetCombinedAttributes(*args, **kwargs):
"""GetCombinedAttributes(self, TextAttrEx contentStyle=None) -> TextAttrEx"""
return _richtext.RichTextParagraph_GetCombinedAttributes(*args, **kwargs)
def GetFirstLineBreakPosition(*args, **kwargs):
"""GetFirstLineBreakPosition(self, long pos) -> long"""
return _richtext.RichTextParagraph_GetFirstLineBreakPosition(*args, **kwargs)
def InitDefaultTabs(*args, **kwargs):
"""InitDefaultTabs()"""
return _richtext.RichTextParagraph_InitDefaultTabs(*args, **kwargs)
InitDefaultTabs = staticmethod(InitDefaultTabs)
def ClearDefaultTabs(*args, **kwargs):
"""ClearDefaultTabs()"""
return _richtext.RichTextParagraph_ClearDefaultTabs(*args, **kwargs)
ClearDefaultTabs = staticmethod(ClearDefaultTabs)
def GetDefaultTabs(*args, **kwargs):
"""GetDefaultTabs() -> wxArrayInt"""
return _richtext.RichTextParagraph_GetDefaultTabs(*args, **kwargs)
GetDefaultTabs = staticmethod(GetDefaultTabs)
_richtext.RichTextParagraph_swigregister(RichTextParagraph)
def RichTextParagraph_InitDefaultTabs(*args):
"""RichTextParagraph_InitDefaultTabs()"""
return _richtext.RichTextParagraph_InitDefaultTabs(*args)
def RichTextParagraph_ClearDefaultTabs(*args):
"""RichTextParagraph_ClearDefaultTabs()"""
return _richtext.RichTextParagraph_ClearDefaultTabs(*args)
def RichTextParagraph_GetDefaultTabs(*args):
"""RichTextParagraph_GetDefaultTabs() -> wxArrayInt"""
return _richtext.RichTextParagraph_GetDefaultTabs(*args)
class RichTextPlainText(RichTextObject):
"""This object represents a single piece of text."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, String text=wxEmptyString, RichTextObject parent=None,
TextAttrEx style=None) -> RichTextPlainText
This object represents a single piece of text.
"""
_richtext.RichTextPlainText_swiginit(self,_richtext.new_RichTextPlainText(*args, **kwargs))
def GetFirstLineBreakPosition(*args, **kwargs):
"""GetFirstLineBreakPosition(self, long pos) -> long"""
return _richtext.RichTextPlainText_GetFirstLineBreakPosition(*args, **kwargs)
def GetText(*args, **kwargs):
"""GetText(self) -> String"""
return _richtext.RichTextPlainText_GetText(*args, **kwargs)
def SetText(*args, **kwargs):
"""SetText(self, String text)"""
return _richtext.RichTextPlainText_SetText(*args, **kwargs)
def Copy(*args, **kwargs):
"""Copy(self, RichTextPlainText obj)"""
return _richtext.RichTextPlainText_Copy(*args, **kwargs)
_richtext.RichTextPlainText_swigregister(RichTextPlainText)
class RichTextImage(RichTextObject):
"""This object represents an image."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, RichTextObject parent=None) -> RichTextImage
This object represents an image.
"""
_richtext.RichTextImage_swiginit(self,_richtext.new_RichTextImage(*args, **kwargs))
def GetImage(*args, **kwargs):
"""GetImage(self) -> Image"""
return _richtext.RichTextImage_GetImage(*args, **kwargs)
def SetImage(*args, **kwargs):
"""SetImage(self, Image image)"""
return _richtext.RichTextImage_SetImage(*args, **kwargs)
def GetImageBlock(*args, **kwargs):
"""GetImageBlock(self) -> wxRichTextImageBlock"""
return _richtext.RichTextImage_GetImageBlock(*args, **kwargs)
def Copy(*args, **kwargs):
"""Copy(self, RichTextImage obj)"""
return _richtext.RichTextImage_Copy(*args, **kwargs)
def LoadFromBlock(*args, **kwargs):
"""LoadFromBlock(self) -> bool"""
return _richtext.RichTextImage_LoadFromBlock(*args, **kwargs)
def MakeBlock(*args, **kwargs):
"""MakeBlock(self) -> bool"""
return _richtext.RichTextImage_MakeBlock(*args, **kwargs)
_richtext.RichTextImage_swigregister(RichTextImage)
class RichTextFileHandlerList_iterator(object):
"""This class serves as an iterator for a wxRichTextFileHandlerList object."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _richtext.delete_RichTextFileHandlerList_iterator
__del__ = lambda self : None;
def next(*args, **kwargs):
"""next(self) -> RichTextFileHandler"""
return _richtext.RichTextFileHandlerList_iterator_next(*args, **kwargs)
_richtext.RichTextFileHandlerList_iterator_swigregister(RichTextFileHandlerList_iterator)
class RichTextFileHandlerList(object):
"""
This class wraps a wxList-based class and gives it a Python
sequence-like interface. Sequence operations supported are length,
index access and iteration.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _richtext.delete_RichTextFileHandlerList
__del__ = lambda self : None;
def __len__(*args, **kwargs):
"""__len__(self) -> size_t"""
return _richtext.RichTextFileHandlerList___len__(*args, **kwargs)
def __getitem__(*args, **kwargs):
"""__getitem__(self, size_t index) -> RichTextFileHandler"""
return _richtext.RichTextFileHandlerList___getitem__(*args, **kwargs)
def __contains__(*args, **kwargs):
"""__contains__(self, RichTextFileHandler obj) -> bool"""
return _richtext.RichTextFileHandlerList___contains__(*args, **kwargs)
def __iter__(*args, **kwargs):
"""__iter__(self) -> RichTextFileHandlerList_iterator"""
return _richtext.RichTextFileHandlerList___iter__(*args, **kwargs)
def __repr__(self):
return "wxRichTextFileHandlerList: " + repr(list(self))
_richtext.RichTextFileHandlerList_swigregister(RichTextFileHandlerList)
class RichTextBuffer(RichTextParagraphLayoutBox):
"""This is a kind of box, used to represent the whole buffer."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self) -> RichTextBuffer
This is a kind of box, used to represent the whole buffer.
"""
_richtext.RichTextBuffer_swiginit(self,_richtext.new_RichTextBuffer(*args, **kwargs))
__swig_destroy__ = _richtext.delete_RichTextBuffer
__del__ = lambda self : None;
def GetCommandProcessor(*args, **kwargs):
"""GetCommandProcessor(self) -> wxCommandProcessor"""
return _richtext.RichTextBuffer_GetCommandProcessor(*args, **kwargs)
def SetStyleSheet(*args, **kwargs):
"""SetStyleSheet(self, wxRichTextStyleSheet styleSheet)"""
return _richtext.RichTextBuffer_SetStyleSheet(*args, **kwargs)
def SetStyleSheetAndNotify(*args, **kwargs):
"""SetStyleSheetAndNotify(self, wxRichTextStyleSheet sheet) -> bool"""
return _richtext.RichTextBuffer_SetStyleSheetAndNotify(*args, **kwargs)
def PushStyleSheet(*args, **kwargs):
"""PushStyleSheet(self, wxRichTextStyleSheet styleSheet) -> bool"""
return _richtext.RichTextBuffer_PushStyleSheet(*args, **kwargs)
def PopStyleSheet(*args, **kwargs):
"""PopStyleSheet(self) -> wxRichTextStyleSheet"""
return _richtext.RichTextBuffer_PopStyleSheet(*args, **kwargs)
def Init(*args, **kwargs):
"""Init(self)"""
return _richtext.RichTextBuffer_Init(*args, **kwargs)
def ResetAndClearCommands(*args, **kwargs):
"""ResetAndClearCommands(self)"""
return _richtext.RichTextBuffer_ResetAndClearCommands(*args, **kwargs)
def LoadFile(*args, **kwargs):
"""LoadFile(self, String filename, int type=RICHTEXT_TYPE_ANY) -> bool"""
return _richtext.RichTextBuffer_LoadFile(*args, **kwargs)
def SaveFile(*args, **kwargs):
"""SaveFile(self, String filename, int type=RICHTEXT_TYPE_ANY) -> bool"""
return _richtext.RichTextBuffer_SaveFile(*args, **kwargs)
def LoadStream(*args, **kwargs):
"""LoadStream(self, InputStream stream, int type=RICHTEXT_TYPE_ANY) -> bool"""
return _richtext.RichTextBuffer_LoadStream(*args, **kwargs)
def SaveStream(*args, **kwargs):
"""SaveStream(self, wxOutputStream stream, int type=RICHTEXT_TYPE_ANY) -> bool"""
return _richtext.RichTextBuffer_SaveStream(*args, **kwargs)
def SetHandlerFlags(*args, **kwargs):
"""SetHandlerFlags(self, int flags)"""
return _richtext.RichTextBuffer_SetHandlerFlags(*args, **kwargs)
def GetHandlerFlags(*args, **kwargs):
"""GetHandlerFlags(self) -> int"""
return _richtext.RichTextBuffer_GetHandlerFlags(*args, **kwargs)
def BeginBatchUndo(*args, **kwargs):
"""BeginBatchUndo(self, String cmdName) -> bool"""
return _richtext.RichTextBuffer_BeginBatchUndo(*args, **kwargs)
def EndBatchUndo(*args, **kwargs):
"""EndBatchUndo(self) -> bool"""
return _richtext.RichTextBuffer_EndBatchUndo(*args, **kwargs)
def BatchingUndo(*args, **kwargs):
"""BatchingUndo(self) -> bool"""
return _richtext.RichTextBuffer_BatchingUndo(*args, **kwargs)
def SubmitAction(*args, **kwargs):
"""SubmitAction(self, RichTextAction action) -> bool"""
return _richtext.RichTextBuffer_SubmitAction(*args, **kwargs)
def GetBatchedCommand(*args, **kwargs):
"""GetBatchedCommand(self) -> RichTextCommand"""
return _richtext.RichTextBuffer_GetBatchedCommand(*args, **kwargs)
def BeginSuppressUndo(*args, **kwargs):
"""BeginSuppressUndo(self) -> bool"""
return _richtext.RichTextBuffer_BeginSuppressUndo(*args, **kwargs)
def EndSuppressUndo(*args, **kwargs):
"""EndSuppressUndo(self) -> bool"""
return _richtext.RichTextBuffer_EndSuppressUndo(*args, **kwargs)
def SuppressingUndo(*args, **kwargs):
"""SuppressingUndo(self) -> bool"""
return _richtext.RichTextBuffer_SuppressingUndo(*args, **kwargs)
def CopyToClipboard(*args, **kwargs):
"""CopyToClipboard(self, RichTextRange range) -> bool"""
return _richtext.RichTextBuffer_CopyToClipboard(*args, **kwargs)
def PasteFromClipboard(*args, **kwargs):
"""PasteFromClipboard(self, long position) -> bool"""
return _richtext.RichTextBuffer_PasteFromClipboard(*args, **kwargs)
def CanPasteFromClipboard(*args, **kwargs):
"""CanPasteFromClipboard(self) -> bool"""
return _richtext.RichTextBuffer_CanPasteFromClipboard(*args, **kwargs)
def BeginStyle(*args, **kwargs):
"""BeginStyle(self, TextAttrEx style) -> bool"""
return _richtext.RichTextBuffer_BeginStyle(*args, **kwargs)
def EndStyle(*args, **kwargs):
"""EndStyle(self) -> bool"""
return _richtext.RichTextBuffer_EndStyle(*args, **kwargs)
def EndAllStyles(*args, **kwargs):
"""EndAllStyles(self) -> bool"""
return _richtext.RichTextBuffer_EndAllStyles(*args, **kwargs)
def ClearStyleStack(*args, **kwargs):
"""ClearStyleStack(self)"""
return _richtext.RichTextBuffer_ClearStyleStack(*args, **kwargs)
def GetStyleStackSize(*args, **kwargs):
"""GetStyleStackSize(self) -> size_t"""
return _richtext.RichTextBuffer_GetStyleStackSize(*args, **kwargs)
def BeginBold(*args, **kwargs):
"""BeginBold(self) -> bool"""
return _richtext.RichTextBuffer_BeginBold(*args, **kwargs)
def EndBold(*args, **kwargs):
"""EndBold(self) -> bool"""
return _richtext.RichTextBuffer_EndBold(*args, **kwargs)
def BeginItalic(*args, **kwargs):
"""BeginItalic(self) -> bool"""
return _richtext.RichTextBuffer_BeginItalic(*args, **kwargs)
def EndItalic(*args, **kwargs):
"""EndItalic(self) -> bool"""
return _richtext.RichTextBuffer_EndItalic(*args, **kwargs)
def BeginUnderline(*args, **kwargs):
"""BeginUnderline(self) -> bool"""
return _richtext.RichTextBuffer_BeginUnderline(*args, **kwargs)
def EndUnderline(*args, **kwargs):
"""EndUnderline(self) -> bool"""
return _richtext.RichTextBuffer_EndUnderline(*args, **kwargs)
def BeginFontSize(*args, **kwargs):
"""BeginFontSize(self, int pointSize) -> bool"""
return _richtext.RichTextBuffer_BeginFontSize(*args, **kwargs)
def EndFontSize(*args, **kwargs):
"""EndFontSize(self) -> bool"""
return _richtext.RichTextBuffer_EndFontSize(*args, **kwargs)
def BeginFont(*args, **kwargs):
"""BeginFont(self, Font font) -> bool"""
return _richtext.RichTextBuffer_BeginFont(*args, **kwargs)
def EndFont(*args, **kwargs):
"""EndFont(self) -> bool"""
return _richtext.RichTextBuffer_EndFont(*args, **kwargs)
def BeginTextColour(*args, **kwargs):
"""BeginTextColour(self, Colour colour) -> bool"""
return _richtext.RichTextBuffer_BeginTextColour(*args, **kwargs)
def EndTextColour(*args, **kwargs):
"""EndTextColour(self) -> bool"""
return _richtext.RichTextBuffer_EndTextColour(*args, **kwargs)
def BeginAlignment(*args, **kwargs):
"""BeginAlignment(self, int alignment) -> bool"""
return _richtext.RichTextBuffer_BeginAlignment(*args, **kwargs)
def EndAlignment(*args, **kwargs):
"""EndAlignment(self) -> bool"""
return _richtext.RichTextBuffer_EndAlignment(*args, **kwargs)
def BeginLeftIndent(*args, **kwargs):
"""BeginLeftIndent(self, int leftIndent, int leftSubIndent=0) -> bool"""
return _richtext.RichTextBuffer_BeginLeftIndent(*args, **kwargs)
def EndLeftIndent(*args, **kwargs):
"""EndLeftIndent(self) -> bool"""
return _richtext.RichTextBuffer_EndLeftIndent(*args, **kwargs)
def BeginRightIndent(*args, **kwargs):
"""BeginRightIndent(self, int rightIndent) -> bool"""
return _richtext.RichTextBuffer_BeginRightIndent(*args, **kwargs)
def EndRightIndent(*args, **kwargs):
"""EndRightIndent(self) -> bool"""
return _richtext.RichTextBuffer_EndRightIndent(*args, **kwargs)
def BeginParagraphSpacing(*args, **kwargs):
"""BeginParagraphSpacing(self, int before, int after) -> bool"""
return _richtext.RichTextBuffer_BeginParagraphSpacing(*args, **kwargs)
def EndParagraphSpacing(*args, **kwargs):
"""EndParagraphSpacing(self) -> bool"""
return _richtext.RichTextBuffer_EndParagraphSpacing(*args, **kwargs)
def BeginLineSpacing(*args, **kwargs):
"""BeginLineSpacing(self, int lineSpacing) -> bool"""
return _richtext.RichTextBuffer_BeginLineSpacing(*args, **kwargs)
def EndLineSpacing(*args, **kwargs):
"""EndLineSpacing(self) -> bool"""
return _richtext.RichTextBuffer_EndLineSpacing(*args, **kwargs)
def BeginNumberedBullet(*args, **kwargs):
"""
BeginNumberedBullet(self, int bulletNumber, int leftIndent, int leftSubIndent,
int bulletStyle=wxTEXT_ATTR_BULLET_STYLE_ARABIC|wxTEXT_ATTR_BULLET_STYLE_PERIOD) -> bool
"""
return _richtext.RichTextBuffer_BeginNumberedBullet(*args, **kwargs)
def EndNumberedBullet(*args, **kwargs):
"""EndNumberedBullet(self) -> bool"""
return _richtext.RichTextBuffer_EndNumberedBullet(*args, **kwargs)
def BeginSymbolBullet(*args, **kwargs):
"""BeginSymbolBullet(self, String symbol, int leftIndent, int leftSubIndent, int bulletStyle=TEXT_ATTR_BULLET_STYLE_SYMBOL) -> bool"""
return _richtext.RichTextBuffer_BeginSymbolBullet(*args, **kwargs)
def EndSymbolBullet(*args, **kwargs):
"""EndSymbolBullet(self) -> bool"""
return _richtext.RichTextBuffer_EndSymbolBullet(*args, **kwargs)
def BeginStandardBullet(*args, **kwargs):
"""
BeginStandardBullet(self, String bulletName, int leftIndent, int leftSubIndent,
int bulletStyle=TEXT_ATTR_BULLET_STYLE_STANDARD) -> bool
"""
return _richtext.RichTextBuffer_BeginStandardBullet(*args, **kwargs)
def EndStandardBullet(*args, **kwargs):
"""EndStandardBullet(self) -> bool"""
return _richtext.RichTextBuffer_EndStandardBullet(*args, **kwargs)
def BeginCharacterStyle(*args, **kwargs):
"""BeginCharacterStyle(self, String characterStyle) -> bool"""
return _richtext.RichTextBuffer_BeginCharacterStyle(*args, **kwargs)
def EndCharacterStyle(*args, **kwargs):
"""EndCharacterStyle(self) -> bool"""
return _richtext.RichTextBuffer_EndCharacterStyle(*args, **kwargs)
def BeginParagraphStyle(*args, **kwargs):
"""BeginParagraphStyle(self, String paragraphStyle) -> bool"""
return _richtext.RichTextBuffer_BeginParagraphStyle(*args, **kwargs)
def EndParagraphStyle(*args, **kwargs):
"""EndParagraphStyle(self) -> bool"""
return _richtext.RichTextBuffer_EndParagraphStyle(*args, **kwargs)
def BeginListStyle(*args, **kwargs):
"""BeginListStyle(self, String listStyle, int level=1, int number=1) -> bool"""
return _richtext.RichTextBuffer_BeginListStyle(*args, **kwargs)
def EndListStyle(*args, **kwargs):
"""EndListStyle(self) -> bool"""
return _richtext.RichTextBuffer_EndListStyle(*args, **kwargs)
def BeginURL(*args, **kwargs):
"""BeginURL(self, String url, String characterStyle=wxEmptyString) -> bool"""
return _richtext.RichTextBuffer_BeginURL(*args, **kwargs)
def EndURL(*args, **kwargs):
"""EndURL(self) -> bool"""
return _richtext.RichTextBuffer_EndURL(*args, **kwargs)
def AddEventHandler(*args, **kwargs):
"""AddEventHandler(self, EvtHandler handler) -> bool"""
return _richtext.RichTextBuffer_AddEventHandler(*args, **kwargs)
def RemoveEventHandler(*args, **kwargs):
"""RemoveEventHandler(self, EvtHandler handler, bool deleteHandler=False) -> bool"""
return _richtext.RichTextBuffer_RemoveEventHandler(*args, **kwargs)
def ClearEventHandlers(*args, **kwargs):
"""ClearEventHandlers(self)"""
return _richtext.RichTextBuffer_ClearEventHandlers(*args, **kwargs)
def SendEvent(*args, **kwargs):
"""SendEvent(self, Event event, bool sendToAll=True) -> bool"""
return _richtext.RichTextBuffer_SendEvent(*args, **kwargs)
def Copy(*args, **kwargs):
"""Copy(self, RichTextBuffer obj)"""
return _richtext.RichTextBuffer_Copy(*args, **kwargs)
def InsertParagraphsWithUndo(*args, **kwargs):
"""
InsertParagraphsWithUndo(self, long pos, RichTextParagraphLayoutBox paragraphs, RichTextCtrl ctrl,
int flags=0) -> bool
"""
return _richtext.RichTextBuffer_InsertParagraphsWithUndo(*args, **kwargs)
def InsertTextWithUndo(*args, **kwargs):
"""InsertTextWithUndo(self, long pos, String text, RichTextCtrl ctrl, int flags=0) -> bool"""
return _richtext.RichTextBuffer_InsertTextWithUndo(*args, **kwargs)
def InsertNewlineWithUndo(*args, **kwargs):
"""InsertNewlineWithUndo(self, long pos, RichTextCtrl ctrl, int flags=0) -> bool"""
return _richtext.RichTextBuffer_InsertNewlineWithUndo(*args, **kwargs)
def InsertImageWithUndo(*args, **kwargs):
"""
InsertImageWithUndo(self, long pos, wxRichTextImageBlock imageBlock, RichTextCtrl ctrl,
int flags=0) -> bool
"""
return _richtext.RichTextBuffer_InsertImageWithUndo(*args, **kwargs)
def DeleteRangeWithUndo(*args, **kwargs):
"""DeleteRangeWithUndo(self, RichTextRange range, RichTextCtrl ctrl) -> bool"""
return _richtext.RichTextBuffer_DeleteRangeWithUndo(*args, **kwargs)
def Modify(*args, **kwargs):
"""Modify(self, bool modify=True)"""
return _richtext.RichTextBuffer_Modify(*args, **kwargs)
def IsModified(*args, **kwargs):
"""IsModified(self) -> bool"""
return _richtext.RichTextBuffer_IsModified(*args, **kwargs)
def GetStyleForNewParagraph(*args, **kwargs):
"""GetStyleForNewParagraph(self, long pos, bool caretPosition=False, bool lookUpNewParaStyle=False) -> TextAttrEx"""
return _richtext.RichTextBuffer_GetStyleForNewParagraph(*args, **kwargs)
def GetHandlers(*args, **kwargs):
"""GetHandlers() -> wxRichTextFileHandlerList_t"""
return _richtext.RichTextBuffer_GetHandlers(*args, **kwargs)
GetHandlers = staticmethod(GetHandlers)
def AddHandler(*args, **kwargs):
"""AddHandler(RichTextFileHandler handler)"""
return _richtext.RichTextBuffer_AddHandler(*args, **kwargs)
AddHandler = staticmethod(AddHandler)
def InsertHandler(*args, **kwargs):
"""InsertHandler(RichTextFileHandler handler)"""
return _richtext.RichTextBuffer_InsertHandler(*args, **kwargs)
InsertHandler = staticmethod(InsertHandler)
def RemoveHandler(*args, **kwargs):
"""RemoveHandler(String name) -> bool"""
return _richtext.RichTextBuffer_RemoveHandler(*args, **kwargs)
RemoveHandler = staticmethod(RemoveHandler)
def FindHandlerByName(*args, **kwargs):
"""FindHandlerByName(String name) -> RichTextFileHandler"""
return _richtext.RichTextBuffer_FindHandlerByName(*args, **kwargs)
FindHandlerByName = staticmethod(FindHandlerByName)
def FindHandlerByExtension(*args, **kwargs):
"""FindHandlerByExtension(String extension, int imageType) -> RichTextFileHandler"""
return _richtext.RichTextBuffer_FindHandlerByExtension(*args, **kwargs)
FindHandlerByExtension = staticmethod(FindHandlerByExtension)
def FindHandlerByFilename(*args, **kwargs):
"""FindHandlerByFilename(String filename, int imageType) -> RichTextFileHandler"""
return _richtext.RichTextBuffer_FindHandlerByFilename(*args, **kwargs)
FindHandlerByFilename = staticmethod(FindHandlerByFilename)
def FindHandlerByType(*args, **kwargs):
"""FindHandlerByType(int imageType) -> RichTextFileHandler"""
return _richtext.RichTextBuffer_FindHandlerByType(*args, **kwargs)
FindHandlerByType = staticmethod(FindHandlerByType)
def GetExtWildcard(*args, **kwargs):
"""
GetExtWildcard(self, bool combine=False, bool save=False) --> (wildcards, types)
Gets a wildcard string for the file dialog based on all the currently
loaded richtext file handlers, and a list that can be used to map
those filter types to the file handler type.
"""
return _richtext.RichTextBuffer_GetExtWildcard(*args, **kwargs)
GetExtWildcard = staticmethod(GetExtWildcard)
def CleanUpHandlers(*args, **kwargs):
"""CleanUpHandlers()"""
return _richtext.RichTextBuffer_CleanUpHandlers(*args, **kwargs)
CleanUpHandlers = staticmethod(CleanUpHandlers)
def InitStandardHandlers(*args, **kwargs):
"""InitStandardHandlers()"""
return _richtext.RichTextBuffer_InitStandardHandlers(*args, **kwargs)
InitStandardHandlers = staticmethod(InitStandardHandlers)
def GetRenderer(*args, **kwargs):
"""GetRenderer() -> RichTextRenderer"""
return _richtext.RichTextBuffer_GetRenderer(*args, **kwargs)
GetRenderer = staticmethod(GetRenderer)
def SetRenderer(*args, **kwargs):
"""SetRenderer(RichTextRenderer renderer)"""
return _richtext.RichTextBuffer_SetRenderer(*args, **kwargs)
SetRenderer = staticmethod(SetRenderer)
def GetBulletRightMargin(*args, **kwargs):
"""GetBulletRightMargin() -> int"""
return _richtext.RichTextBuffer_GetBulletRightMargin(*args, **kwargs)
GetBulletRightMargin = staticmethod(GetBulletRightMargin)
def SetBulletRightMargin(*args, **kwargs):
"""SetBulletRightMargin(int margin)"""
return _richtext.RichTextBuffer_SetBulletRightMargin(*args, **kwargs)
SetBulletRightMargin = staticmethod(SetBulletRightMargin)
def GetBulletProportion(*args, **kwargs):
"""GetBulletProportion() -> float"""
return _richtext.RichTextBuffer_GetBulletProportion(*args, **kwargs)
GetBulletProportion = staticmethod(GetBulletProportion)
def SetBulletProportion(*args, **kwargs):
"""SetBulletProportion(float prop)"""
return _richtext.RichTextBuffer_SetBulletProportion(*args, **kwargs)
SetBulletProportion = staticmethod(SetBulletProportion)
def GetScale(*args, **kwargs):
"""GetScale(self) -> double"""
return _richtext.RichTextBuffer_GetScale(*args, **kwargs)
def SetScale(*args, **kwargs):
"""SetScale(self, double scale)"""
return _richtext.RichTextBuffer_SetScale(*args, **kwargs)
_richtext.RichTextBuffer_swigregister(RichTextBuffer)
def RichTextBuffer_GetHandlers(*args):
"""RichTextBuffer_GetHandlers() -> wxRichTextFileHandlerList_t"""
return _richtext.RichTextBuffer_GetHandlers(*args)
def RichTextBuffer_AddHandler(*args, **kwargs):
"""RichTextBuffer_AddHandler(RichTextFileHandler handler)"""
return _richtext.RichTextBuffer_AddHandler(*args, **kwargs)
def RichTextBuffer_InsertHandler(*args, **kwargs):
"""RichTextBuffer_InsertHandler(RichTextFileHandler handler)"""
return _richtext.RichTextBuffer_InsertHandler(*args, **kwargs)
def RichTextBuffer_RemoveHandler(*args, **kwargs):
"""RichTextBuffer_RemoveHandler(String name) -> bool"""
return _richtext.RichTextBuffer_RemoveHandler(*args, **kwargs)
def RichTextBuffer_FindHandlerByName(*args, **kwargs):
"""RichTextBuffer_FindHandlerByName(String name) -> RichTextFileHandler"""
return _richtext.RichTextBuffer_FindHandlerByName(*args, **kwargs)
def RichTextBuffer_FindHandlerByExtension(*args, **kwargs):
"""RichTextBuffer_FindHandlerByExtension(String extension, int imageType) -> RichTextFileHandler"""
return _richtext.RichTextBuffer_FindHandlerByExtension(*args, **kwargs)
def RichTextBuffer_FindHandlerByFilename(*args, **kwargs):
"""RichTextBuffer_FindHandlerByFilename(String filename, int imageType) -> RichTextFileHandler"""
return _richtext.RichTextBuffer_FindHandlerByFilename(*args, **kwargs)
def RichTextBuffer_FindHandlerByType(*args, **kwargs):
"""RichTextBuffer_FindHandlerByType(int imageType) -> RichTextFileHandler"""
return _richtext.RichTextBuffer_FindHandlerByType(*args, **kwargs)
def RichTextBuffer_GetExtWildcard(*args, **kwargs):
"""
GetExtWildcard(self, bool combine=False, bool save=False) --> (wildcards, types)
Gets a wildcard string for the file dialog based on all the currently
loaded richtext file handlers, and a list that can be used to map
those filter types to the file handler type.
"""
return _richtext.RichTextBuffer_GetExtWildcard(*args, **kwargs)
def RichTextBuffer_CleanUpHandlers(*args):
"""RichTextBuffer_CleanUpHandlers()"""
return _richtext.RichTextBuffer_CleanUpHandlers(*args)
def RichTextBuffer_InitStandardHandlers(*args):
"""RichTextBuffer_InitStandardHandlers()"""
return _richtext.RichTextBuffer_InitStandardHandlers(*args)
def RichTextBuffer_GetRenderer(*args):
"""RichTextBuffer_GetRenderer() -> RichTextRenderer"""
return _richtext.RichTextBuffer_GetRenderer(*args)
def RichTextBuffer_SetRenderer(*args, **kwargs):
"""RichTextBuffer_SetRenderer(RichTextRenderer renderer)"""
return _richtext.RichTextBuffer_SetRenderer(*args, **kwargs)
def RichTextBuffer_GetBulletRightMargin(*args):
"""RichTextBuffer_GetBulletRightMargin() -> int"""
return _richtext.RichTextBuffer_GetBulletRightMargin(*args)
def RichTextBuffer_SetBulletRightMargin(*args, **kwargs):
"""RichTextBuffer_SetBulletRightMargin(int margin)"""
return _richtext.RichTextBuffer_SetBulletRightMargin(*args, **kwargs)
def RichTextBuffer_GetBulletProportion(*args):
"""RichTextBuffer_GetBulletProportion() -> float"""
return _richtext.RichTextBuffer_GetBulletProportion(*args)
def RichTextBuffer_SetBulletProportion(*args, **kwargs):
"""RichTextBuffer_SetBulletProportion(float prop)"""
return _richtext.RichTextBuffer_SetBulletProportion(*args, **kwargs)
#---------------------------------------------------------------------------
RICHTEXT_HANDLER_INCLUDE_STYLESHEET = _richtext.RICHTEXT_HANDLER_INCLUDE_STYLESHEET
RICHTEXT_HANDLER_SAVE_IMAGES_TO_MEMORY = _richtext.RICHTEXT_HANDLER_SAVE_IMAGES_TO_MEMORY
RICHTEXT_HANDLER_SAVE_IMAGES_TO_FILES = _richtext.RICHTEXT_HANDLER_SAVE_IMAGES_TO_FILES
RICHTEXT_HANDLER_SAVE_IMAGES_TO_BASE64 = _richtext.RICHTEXT_HANDLER_SAVE_IMAGES_TO_BASE64
RICHTEXT_HANDLER_NO_HEADER_FOOTER = _richtext.RICHTEXT_HANDLER_NO_HEADER_FOOTER
RICHTEXT_HANDLER_CONVERT_FACENAMES = _richtext.RICHTEXT_HANDLER_CONVERT_FACENAMES
class RichTextFileHandler(_core.Object):
"""Base class for file handlers"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _richtext.delete_RichTextFileHandler
__del__ = lambda self : None;
def LoadStream(*args, **kwargs):
"""LoadStream(self, RichTextBuffer buffer, InputStream stream) -> bool"""
return _richtext.RichTextFileHandler_LoadStream(*args, **kwargs)
def SaveStream(*args, **kwargs):
"""SaveStream(self, RichTextBuffer buffer, wxOutputStream stream) -> bool"""
return _richtext.RichTextFileHandler_SaveStream(*args, **kwargs)
def LoadFile(*args, **kwargs):
"""LoadFile(self, RichTextBuffer buffer, String filename) -> bool"""
return _richtext.RichTextFileHandler_LoadFile(*args, **kwargs)
def SaveFile(*args, **kwargs):
"""SaveFile(self, RichTextBuffer buffer, String filename) -> bool"""
return _richtext.RichTextFileHandler_SaveFile(*args, **kwargs)
def CanHandle(*args, **kwargs):
"""CanHandle(self, String filename) -> bool"""
return _richtext.RichTextFileHandler_CanHandle(*args, **kwargs)
def CanSave(*args, **kwargs):
"""CanSave(self) -> bool"""
return _richtext.RichTextFileHandler_CanSave(*args, **kwargs)
def CanLoad(*args, **kwargs):
"""CanLoad(self) -> bool"""
return _richtext.RichTextFileHandler_CanLoad(*args, **kwargs)
def IsVisible(*args, **kwargs):
"""IsVisible(self) -> bool"""
return _richtext.RichTextFileHandler_IsVisible(*args, **kwargs)
def SetVisible(*args, **kwargs):
"""SetVisible(self, bool visible)"""
return _richtext.RichTextFileHandler_SetVisible(*args, **kwargs)
def SetName(*args, **kwargs):
"""SetName(self, String name)"""
return _richtext.RichTextFileHandler_SetName(*args, **kwargs)
def GetName(*args, **kwargs):
"""GetName(self) -> String"""
return _richtext.RichTextFileHandler_GetName(*args, **kwargs)
Name = property(GetName,SetName)
def SetExtension(*args, **kwargs):
"""SetExtension(self, String ext)"""
return _richtext.RichTextFileHandler_SetExtension(*args, **kwargs)
def GetExtension(*args, **kwargs):
"""GetExtension(self) -> String"""
return _richtext.RichTextFileHandler_GetExtension(*args, **kwargs)
Extension = property(GetExtension,SetExtension)
def SetType(*args, **kwargs):
"""SetType(self, int type)"""
return _richtext.RichTextFileHandler_SetType(*args, **kwargs)
def GetType(*args, **kwargs):
"""GetType(self) -> int"""
return _richtext.RichTextFileHandler_GetType(*args, **kwargs)
Type = property(GetType,SetType)
def SetFlags(*args, **kwargs):
"""SetFlags(self, int flags)"""
return _richtext.RichTextFileHandler_SetFlags(*args, **kwargs)
def GetFlags(*args, **kwargs):
"""GetFlags(self) -> int"""
return _richtext.RichTextFileHandler_GetFlags(*args, **kwargs)
Flags = property(GetFlags,SetFlags)
def SetEncoding(*args, **kwargs):
"""SetEncoding(self, String encoding)"""
return _richtext.RichTextFileHandler_SetEncoding(*args, **kwargs)
def GetEncoding(*args, **kwargs):
"""GetEncoding(self) -> String"""
return _richtext.RichTextFileHandler_GetEncoding(*args, **kwargs)
Encoding = property(GetEncoding,SetEncoding)
_richtext.RichTextFileHandler_swigregister(RichTextFileHandler)
class RichTextPlainTextHandler(RichTextFileHandler):
"""Proxy of C++ RichTextPlainTextHandler class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, String name=TextName, String ext=TextExt, int type=RICHTEXT_TYPE_TEXT) -> RichTextPlainTextHandler"""
_richtext.RichTextPlainTextHandler_swiginit(self,_richtext.new_RichTextPlainTextHandler(*args, **kwargs))
_richtext.RichTextPlainTextHandler_swigregister(RichTextPlainTextHandler)
TextName = cvar.TextName
TextExt = cvar.TextExt
#---------------------------------------------------------------------------
class RichTextRenderer(_core.Object):
"""Proxy of C++ RichTextRenderer class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _richtext.delete_RichTextRenderer
__del__ = lambda self : None;
def DrawStandardBullet(*args, **kwargs):
"""
DrawStandardBullet(self, RichTextParagraph paragraph, DC dc, TextAttrEx attr,
Rect rect) -> bool
"""
return _richtext.RichTextRenderer_DrawStandardBullet(*args, **kwargs)
def DrawTextBullet(*args, **kwargs):
"""
DrawTextBullet(self, RichTextParagraph paragraph, DC dc, TextAttrEx attr,
Rect rect, String text) -> bool
"""
return _richtext.RichTextRenderer_DrawTextBullet(*args, **kwargs)
def DrawBitmapBullet(*args, **kwargs):
"""
DrawBitmapBullet(self, RichTextParagraph paragraph, DC dc, TextAttrEx attr,
Rect rect) -> bool
"""
return _richtext.RichTextRenderer_DrawBitmapBullet(*args, **kwargs)
def EnumerateStandardBulletNames(*args, **kwargs):
"""EnumerateStandardBulletNames(self, wxArrayString bulletNames) -> bool"""
return _richtext.RichTextRenderer_EnumerateStandardBulletNames(*args, **kwargs)
_richtext.RichTextRenderer_swigregister(RichTextRenderer)
class RichTextStdRenderer(RichTextRenderer):
"""Proxy of C++ RichTextStdRenderer class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> RichTextStdRenderer"""
_richtext.RichTextStdRenderer_swiginit(self,_richtext.new_RichTextStdRenderer(*args, **kwargs))
_richtext.RichTextStdRenderer_swigregister(RichTextStdRenderer)
#---------------------------------------------------------------------------
RE_READONLY = _richtext.RE_READONLY
RE_MULTILINE = _richtext.RE_MULTILINE
RE_CENTER_CARET = _richtext.RE_CENTER_CARET
RE_CENTRE_CARET = _richtext.RE_CENTRE_CARET
RICHTEXT_SHIFT_DOWN = _richtext.RICHTEXT_SHIFT_DOWN
RICHTEXT_CTRL_DOWN = _richtext.RICHTEXT_CTRL_DOWN
RICHTEXT_ALT_DOWN = _richtext.RICHTEXT_ALT_DOWN
RICHTEXT_SELECTED = _richtext.RICHTEXT_SELECTED
RICHTEXT_TAGGED = _richtext.RICHTEXT_TAGGED
RICHTEXT_FOCUSSED = _richtext.RICHTEXT_FOCUSSED
RICHTEXT_IS_FOCUS = _richtext.RICHTEXT_IS_FOCUS
class RichTextCtrl(_core.Control):
"""Proxy of C++ RichTextCtrl class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, Window parent, int id=-1, String value=EmptyString,
Point pos=DefaultPosition, Size size=DefaultSize,
long style=RE_MULTILINE, Validator validator=DefaultValidator,
String name=RichTextCtrlNameStr) -> RichTextCtrl
"""
_richtext.RichTextCtrl_swiginit(self,_richtext.new_RichTextCtrl(*args, **kwargs))
self._setOORInfo(self)
def Create(*args, **kwargs):
"""
Create(self, Window parent, int id=-1, String value=EmptyString,
Point pos=DefaultPosition, Size size=DefaultSize,
long style=RE_MULTILINE, Validator validator=DefaultValidator,
String name=RichTextCtrlNameStr) -> bool
"""
return _richtext.RichTextCtrl_Create(*args, **kwargs)
def GetValue(*args, **kwargs):
"""GetValue(self) -> String"""
return _richtext.RichTextCtrl_GetValue(*args, **kwargs)
def SetValue(*args, **kwargs):
"""SetValue(self, String value)"""
return _richtext.RichTextCtrl_SetValue(*args, **kwargs)
def GetRange(*args, **kwargs):
"""GetRange(self, long from, long to) -> String"""
return _richtext.RichTextCtrl_GetRange(*args, **kwargs)
def GetLineLength(*args, **kwargs):
"""GetLineLength(self, long lineNo) -> int"""
return _richtext.RichTextCtrl_GetLineLength(*args, **kwargs)
def GetLineText(*args, **kwargs):
"""GetLineText(self, long lineNo) -> String"""
return _richtext.RichTextCtrl_GetLineText(*args, **kwargs)
def GetNumberOfLines(*args, **kwargs):
"""GetNumberOfLines(self) -> int"""
return _richtext.RichTextCtrl_GetNumberOfLines(*args, **kwargs)
def IsModified(*args, **kwargs):
"""IsModified(self) -> bool"""
return _richtext.RichTextCtrl_IsModified(*args, **kwargs)
def IsEditable(*args, **kwargs):
"""IsEditable(self) -> bool"""
return _richtext.RichTextCtrl_IsEditable(*args, **kwargs)
def IsSingleLine(*args, **kwargs):
"""IsSingleLine(self) -> bool"""
return _richtext.RichTextCtrl_IsSingleLine(*args, **kwargs)
def IsMultiLine(*args, **kwargs):
"""IsMultiLine(self) -> bool"""
return _richtext.RichTextCtrl_IsMultiLine(*args, **kwargs)
def GetSelection(*args, **kwargs):
"""
GetSelection() --> (start, end)
Returns the start and end positions of the current selection. If the
values are the same then there is no selection.
"""
return _richtext.RichTextCtrl_GetSelection(*args, **kwargs)
def GetStringSelection(*args, **kwargs):
"""GetStringSelection(self) -> String"""
return _richtext.RichTextCtrl_GetStringSelection(*args, **kwargs)
def GetFilename(*args, **kwargs):
"""GetFilename(self) -> String"""
return _richtext.RichTextCtrl_GetFilename(*args, **kwargs)
def SetFilename(*args, **kwargs):
"""SetFilename(self, String filename)"""
return _richtext.RichTextCtrl_SetFilename(*args, **kwargs)
def SetDelayedLayoutThreshold(*args, **kwargs):
"""
SetDelayedLayoutThreshold(self, long threshold)
Set the threshold in character positions for doing layout optimization
during sizing.
"""
return _richtext.RichTextCtrl_SetDelayedLayoutThreshold(*args, **kwargs)
def GetDelayedLayoutThreshold(*args, **kwargs):
"""
GetDelayedLayoutThreshold(self) -> long
Get the threshold in character positions for doing layout optimization
during sizing.
"""
return _richtext.RichTextCtrl_GetDelayedLayoutThreshold(*args, **kwargs)
def SetTextCursor(*args, **kwargs):
"""
SetTextCursor(self, Cursor cursor)
Set text cursor
"""
return _richtext.RichTextCtrl_SetTextCursor(*args, **kwargs)
def GetTextCursor(*args, **kwargs):
"""
GetTextCursor(self) -> Cursor
Get text cursor
"""
return _richtext.RichTextCtrl_GetTextCursor(*args, **kwargs)
def SetURLCursor(*args, **kwargs):
"""
SetURLCursor(self, Cursor cursor)
Set URL cursor
"""
return _richtext.RichTextCtrl_SetURLCursor(*args, **kwargs)
def GetURLCursor(*args, **kwargs):
"""
GetURLCursor(self) -> Cursor
Get URL cursor
"""
return _richtext.RichTextCtrl_GetURLCursor(*args, **kwargs)
def GetContextMenu(*args, **kwargs):
"""GetContextMenu(self) -> Menu"""
return _richtext.RichTextCtrl_GetContextMenu(*args, **kwargs)
def SetContextMenu(*args, **kwargs):
"""SetContextMenu(self, Menu menu)"""
return _richtext.RichTextCtrl_SetContextMenu(*args, **kwargs)
def Clear(*args, **kwargs):
"""Clear(self)"""
return _richtext.RichTextCtrl_Clear(*args, **kwargs)
def Replace(*args, **kwargs):
"""Replace(self, long from, long to, String value)"""
return _richtext.RichTextCtrl_Replace(*args, **kwargs)
def Remove(*args, **kwargs):
"""Remove(self, long from, long to)"""
return _richtext.RichTextCtrl_Remove(*args, **kwargs)
def LoadFile(*args, **kwargs):
"""
LoadFile(self, String file, int type=RICHTEXT_TYPE_ANY) -> bool
Load the contents of the document from the given filename.
"""
return _richtext.RichTextCtrl_LoadFile(*args, **kwargs)
def SaveFile(*args, **kwargs):
"""
SaveFile(self, String file=EmptyString, int type=RICHTEXT_TYPE_ANY) -> bool
Save the contents of the document to the given filename, or if the
empty string is passed then to the filename set with `SetFilename`.
"""
return _richtext.RichTextCtrl_SaveFile(*args, **kwargs)
def SetHandlerFlags(*args, **kwargs):
"""
SetHandlerFlags(self, int flags)
Set the handler flags, controlling loading and saving.
"""
return _richtext.RichTextCtrl_SetHandlerFlags(*args, **kwargs)
def GetHandlerFlags(*args, **kwargs):
"""
GetHandlerFlags(self) -> int
Get the handler flags, controlling loading and saving.
"""
return _richtext.RichTextCtrl_GetHandlerFlags(*args, **kwargs)
def MarkDirty(*args, **kwargs):
"""
MarkDirty(self)
Sets the dirty flag, meaning that the contents of the control have
changed and need to be saved.
"""
return _richtext.RichTextCtrl_MarkDirty(*args, **kwargs)
def DiscardEdits(*args, **kwargs):
"""
DiscardEdits(self)
Clears the dirty flag.
:see: `MarkDirty`
"""
return _richtext.RichTextCtrl_DiscardEdits(*args, **kwargs)
def SetMaxLength(*args, **kwargs):
"""
SetMaxLength(self, unsigned long len)
Set the max number of characters which may be entered in a single line
text control.
"""
return _richtext.RichTextCtrl_SetMaxLength(*args, **kwargs)
def WriteText(*args, **kwargs):
"""
WriteText(self, String text)
Insert text at the current position.
"""
return _richtext.RichTextCtrl_WriteText(*args, **kwargs)
def AppendText(*args, **kwargs):
"""
AppendText(self, String text)
Append text to the end of the document.
"""
return _richtext.RichTextCtrl_AppendText(*args, **kwargs)
def SetStyle(*args, **kwargs):
"""
SetStyle(self, RichTextRange range, TextAttrEx style) -> bool
Set the style for the text in ``range`` to ``style``
"""
return _richtext.RichTextCtrl_SetStyle(*args, **kwargs)
def GetStyle(*args, **kwargs):
"""
GetStyle(self, long position, TextAttrEx style) -> bool
Retrieve the style used at the given position. Copies the style
values at ``position`` into the ``style`` parameter and returns ``True``
if successful. Returns ``False`` otherwise.
"""
return _richtext.RichTextCtrl_GetStyle(*args, **kwargs)
def GetStyleForRange(*args, **kwargs):
"""
GetStyleForRange(self, RichTextRange range, TextAttrEx style) -> bool
Get the common set of styles for the range
"""
return _richtext.RichTextCtrl_GetStyleForRange(*args, **kwargs)
def SetStyleEx(*args, **kwargs):
"""
SetStyleEx(self, RichTextRange range, TextAttrEx style, int flags=RICHTEXT_SETSTYLE_WITH_UNDO) -> bool
Extended style setting operation with flags including:
RICHTEXT_SETSTYLE_WITH_UNDO, RICHTEXT_SETSTYLE_OPTIMIZE,
RICHTEXT_SETSTYLE_PARAGRAPHS_ONLY, RICHTEXT_SETSTYLE_CHARACTERS_ONLY
"""
return _richtext.RichTextCtrl_SetStyleEx(*args, **kwargs)
def GetUncombinedStyle(*args, **kwargs):
"""
GetUncombinedStyle(self, long position, TextAttrEx style) -> bool
Get the content (uncombined) attributes for this position. Copies the
style values at ``position`` into the ``style`` parameter and returns
``True`` if successful. Returns ``False`` otherwise.
"""
return _richtext.RichTextCtrl_GetUncombinedStyle(*args, **kwargs)
def SetDefaultStyle(*args, **kwargs):
"""
SetDefaultStyle(self, TextAttrEx style) -> bool
Set the style used by default for the rich text document.
"""
return _richtext.RichTextCtrl_SetDefaultStyle(*args, **kwargs)
def GetDefaultStyle(*args, **kwargs):
"""
GetDefaultStyle(self) -> TextAttrEx
Retrieves a copy of the default style object.
"""
return _richtext.RichTextCtrl_GetDefaultStyle(*args, **kwargs)
def SetListStyle(*args, **kwargs):
"""
SetListStyle(self, RichTextRange range, String defName, int flags=RICHTEXT_SETSTYLE_WITH_UNDO,
int startFrom=1, int specifiedLevel=-1) -> bool
"""
return _richtext.RichTextCtrl_SetListStyle(*args, **kwargs)
def ClearListStyle(*args, **kwargs):
"""ClearListStyle(self, RichTextRange range, int flags=RICHTEXT_SETSTYLE_WITH_UNDO) -> bool"""
return _richtext.RichTextCtrl_ClearListStyle(*args, **kwargs)
def NumberList(*args, **kwargs):
"""
NumberList(self, RichTextRange range, String defName, int flags=RICHTEXT_SETSTYLE_WITH_UNDO,
int startFrom=1, int specifiedLevel=-1) -> bool
"""
return _richtext.RichTextCtrl_NumberList(*args, **kwargs)
def PromoteList(*args, **kwargs):
"""
PromoteList(self, int promoteBy, RichTextRange range, String defName,
int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int specifiedLevel=-1) -> bool
"""
return _richtext.RichTextCtrl_PromoteList(*args, **kwargs)
def Delete(*args, **kwargs):
"""Delete(self, RichTextRange range) -> bool"""
return _richtext.RichTextCtrl_Delete(*args, **kwargs)
def XYToPosition(*args, **kwargs):
"""
XYToPosition(self, long x, long y) -> long
Translate a col,row coordinants into a document position.
"""
return _richtext.RichTextCtrl_XYToPosition(*args, **kwargs)
def PositionToXY(*args, **kwargs):
"""
PositionToXY(self, long pos) --> (x, y)
Retrieves the col,row for the given position within the document
"""
return _richtext.RichTextCtrl_PositionToXY(*args, **kwargs)
def ShowPosition(*args, **kwargs):
"""
ShowPosition(self, long position)
Ensure that the given position in the document is visible.
"""
return _richtext.RichTextCtrl_ShowPosition(*args, **kwargs)
def HitTest(*args, **kwargs):
"""
HitTest(self, Point pt) --> (result, pos)
Returns the character position at the given point in pixels. Note
that ``pt`` should be given in device coordinates, and not be adjusted
for the client area origin nor for scrolling. The return value is a
tuple of the hit test result and the position.
"""
return _richtext.RichTextCtrl_HitTest(*args, **kwargs)
def HitTestXY(*args, **kwargs):
"""
HitTestRC(self, Point pt) --> (result, col, row)
Returns the column and row of the given point in pixels. Note that
``pt`` should be given in device coordinates, and not be adjusted for
the client area origin nor for scrolling. The return value is a tuple
of the hit test result and the column and row values.
"""
return _richtext.RichTextCtrl_HitTestXY(*args, **kwargs)
def Copy(*args, **kwargs):
"""
Copy(self)
Copies the selected text to the clipboard.
"""
return _richtext.RichTextCtrl_Copy(*args, **kwargs)
def Cut(*args, **kwargs):
"""
Cut(self)
Copies the selected text to the clipboard and removes the selection.
"""
return _richtext.RichTextCtrl_Cut(*args, **kwargs)
def Paste(*args, **kwargs):
"""
Paste(self)
Pastes text from the clipboard into the document at the current
insertion point.
"""
return _richtext.RichTextCtrl_Paste(*args, **kwargs)
def DeleteSelection(*args, **kwargs):
"""
DeleteSelection(self)
Remove the current selection.
"""
return _richtext.RichTextCtrl_DeleteSelection(*args, **kwargs)
def CanCopy(*args, **kwargs):
"""
CanCopy(self) -> bool
Returns ``True`` if the selection can be copied to the clipboard.
"""
return _richtext.RichTextCtrl_CanCopy(*args, **kwargs)
def CanCut(*args, **kwargs):
"""
CanCut(self) -> bool
Returns ``True`` if the selection can be cut to the clipboard.
"""
return _richtext.RichTextCtrl_CanCut(*args, **kwargs)
def CanPaste(*args, **kwargs):
"""
CanPaste(self) -> bool
Returns ``True`` if the current contents of the clipboard can be
pasted into the document.
"""
return _richtext.RichTextCtrl_CanPaste(*args, **kwargs)
def CanDeleteSelection(*args, **kwargs):
"""
CanDeleteSelection(self) -> bool
Returns ``True`` if the selection can be removed from the document.
"""
return _richtext.RichTextCtrl_CanDeleteSelection(*args, **kwargs)
def Undo(*args, **kwargs):
"""
Undo(self)
If the last operation can be undone, undoes the last operation.
"""
return _richtext.RichTextCtrl_Undo(*args, **kwargs)
def Redo(*args, **kwargs):
"""
Redo(self)
If the last operation can be redone, redoes the last operation.
"""
return _richtext.RichTextCtrl_Redo(*args, **kwargs)
def CanUndo(*args, **kwargs):
"""
CanUndo(self) -> bool
Returns ``True`` if the last operation can be undone.
"""
return _richtext.RichTextCtrl_CanUndo(*args, **kwargs)
def CanRedo(*args, **kwargs):
"""
CanRedo(self) -> bool
Returns ``True`` if the last operation can be redone.
"""
return _richtext.RichTextCtrl_CanRedo(*args, **kwargs)
def SetInsertionPoint(*args, **kwargs):
"""
SetInsertionPoint(self, long pos)
Sets the insertion point at the given position.
"""
return _richtext.RichTextCtrl_SetInsertionPoint(*args, **kwargs)
def SetInsertionPointEnd(*args, **kwargs):
"""
SetInsertionPointEnd(self)
Moves the insertion point to the end of the document.
"""
return _richtext.RichTextCtrl_SetInsertionPointEnd(*args, **kwargs)
def GetInsertionPoint(*args, **kwargs):
"""
GetInsertionPoint(self) -> long
Returns the insertion point. This is defined as the zero based index
of the character position to the right of the insertion point.
"""
return _richtext.RichTextCtrl_GetInsertionPoint(*args, **kwargs)
def GetLastPosition(*args, **kwargs):
"""
GetLastPosition(self) -> long
Returns the zero based index of the last position in the document.
"""
return _richtext.RichTextCtrl_GetLastPosition(*args, **kwargs)
def SetSelection(*args, **kwargs):
"""
SetSelection(self, long from, long to)
Selects the text starting at the first position up to (but not
including) the character at the last position. If both parameters are
equal to -1 then all text in the control is selected.
"""
return _richtext.RichTextCtrl_SetSelection(*args, **kwargs)
def SelectAll(*args, **kwargs):
"""
SelectAll(self)
Select all text in the document.
"""
return _richtext.RichTextCtrl_SelectAll(*args, **kwargs)
def SetEditable(*args, **kwargs):
"""
SetEditable(self, bool editable)
Makes the document editable or read-only, overriding the RE_READONLY
flag.
"""
return _richtext.RichTextCtrl_SetEditable(*args, **kwargs)
def HasSelection(*args, **kwargs):
"""HasSelection(self) -> bool"""
return _richtext.RichTextCtrl_HasSelection(*args, **kwargs)
def WriteImage(*args, **kwargs):
"""
WriteImage(self, Image image, int bitmapType=BITMAP_TYPE_PNG) -> bool
Write an image at the current insertion point. Supply optional type to
use for internal and file storage of the raw data.
"""
return _richtext.RichTextCtrl_WriteImage(*args, **kwargs)
def WriteBitmap(*args, **kwargs):
"""
WriteBitmap(self, Bitmap bitmap, int bitmapType=BITMAP_TYPE_PNG) -> bool
Write a bitmap at the current insertion point. Supply optional type to
use for internal and file storage of the raw data.
"""
return _richtext.RichTextCtrl_WriteBitmap(*args, **kwargs)
def WriteImageFile(*args, **kwargs):
"""
WriteImageFile(self, String filename, int bitmapType) -> bool
Load an image from file and write at the current insertion point.
"""
return _richtext.RichTextCtrl_WriteImageFile(*args, **kwargs)
def WriteImageBlock(*args, **kwargs):
"""
WriteImageBlock(self, wxRichTextImageBlock imageBlock) -> bool
Write an image block at the current insertion point.
"""
return _richtext.RichTextCtrl_WriteImageBlock(*args, **kwargs)
def Newline(*args, **kwargs):
"""
Newline(self) -> bool
Insert a newline (actually paragraph) at the current insertion point.
"""
return _richtext.RichTextCtrl_Newline(*args, **kwargs)
def LineBreak(*args, **kwargs):
"""
LineBreak(self) -> bool
Insert a line break at the current insertion point.
"""
return _richtext.RichTextCtrl_LineBreak(*args, **kwargs)
def SetBasicStyle(*args, **kwargs):
"""SetBasicStyle(self, TextAttrEx style)"""
return _richtext.RichTextCtrl_SetBasicStyle(*args, **kwargs)
def GetBasicStyle(*args, **kwargs):
"""
GetBasicStyle(self) -> TextAttrEx
Get basic (overall) style
"""
return _richtext.RichTextCtrl_GetBasicStyle(*args, **kwargs)
def BeginStyle(*args, **kwargs):
"""
BeginStyle(self, TextAttrEx style) -> bool
Begin using a style
"""
return _richtext.RichTextCtrl_BeginStyle(*args, **kwargs)
def EndStyle(*args, **kwargs):
"""
EndStyle(self) -> bool
End the style
"""
return _richtext.RichTextCtrl_EndStyle(*args, **kwargs)
def EndAllStyles(*args, **kwargs):
"""
EndAllStyles(self) -> bool
End all styles
"""
return _richtext.RichTextCtrl_EndAllStyles(*args, **kwargs)
def BeginBold(*args, **kwargs):
"""
BeginBold(self) -> bool
Begin using bold
"""
return _richtext.RichTextCtrl_BeginBold(*args, **kwargs)
def EndBold(*args, **kwargs):
"""
EndBold(self) -> bool
End using bold
"""
return _richtext.RichTextCtrl_EndBold(*args, **kwargs)
def BeginItalic(*args, **kwargs):
"""
BeginItalic(self) -> bool
Begin using italic
"""
return _richtext.RichTextCtrl_BeginItalic(*args, **kwargs)
def EndItalic(*args, **kwargs):
"""
EndItalic(self) -> bool
End using italic
"""
return _richtext.RichTextCtrl_EndItalic(*args, **kwargs)
def BeginUnderline(*args, **kwargs):
"""
BeginUnderline(self) -> bool
Begin using underline
"""
return _richtext.RichTextCtrl_BeginUnderline(*args, **kwargs)
def EndUnderline(*args, **kwargs):
"""
EndUnderline(self) -> bool
End using underline
"""
return _richtext.RichTextCtrl_EndUnderline(*args, **kwargs)
def BeginFontSize(*args, **kwargs):
"""
BeginFontSize(self, int pointSize) -> bool
Begin using point size
"""
return _richtext.RichTextCtrl_BeginFontSize(*args, **kwargs)
def EndFontSize(*args, **kwargs):
"""
EndFontSize(self) -> bool
End using point size
"""
return _richtext.RichTextCtrl_EndFontSize(*args, **kwargs)
def BeginFont(*args, **kwargs):
"""
BeginFont(self, Font font) -> bool
Begin using this font
"""
return _richtext.RichTextCtrl_BeginFont(*args, **kwargs)
def EndFont(*args, **kwargs):
"""
EndFont(self) -> bool
End using a font
"""
return _richtext.RichTextCtrl_EndFont(*args, **kwargs)
def BeginTextColour(*args, **kwargs):
"""
BeginTextColour(self, Colour colour) -> bool
Begin using this colour
"""
return _richtext.RichTextCtrl_BeginTextColour(*args, **kwargs)
def EndTextColour(*args, **kwargs):
"""
EndTextColour(self) -> bool
End using a colour
"""
return _richtext.RichTextCtrl_EndTextColour(*args, **kwargs)
def BeginAlignment(*args, **kwargs):
"""
BeginAlignment(self, int alignment) -> bool
Begin using alignment
"""
return _richtext.RichTextCtrl_BeginAlignment(*args, **kwargs)
def EndAlignment(*args, **kwargs):
"""
EndAlignment(self) -> bool
End alignment
"""
return _richtext.RichTextCtrl_EndAlignment(*args, **kwargs)
def BeginLeftIndent(*args, **kwargs):
"""
BeginLeftIndent(self, int leftIndent, int leftSubIndent=0) -> bool
Begin left indent
"""
return _richtext.RichTextCtrl_BeginLeftIndent(*args, **kwargs)
def EndLeftIndent(*args, **kwargs):
"""
EndLeftIndent(self) -> bool
End left indent
"""
return _richtext.RichTextCtrl_EndLeftIndent(*args, **kwargs)
def BeginRightIndent(*args, **kwargs):
"""
BeginRightIndent(self, int rightIndent) -> bool
Begin right indent
"""
return _richtext.RichTextCtrl_BeginRightIndent(*args, **kwargs)
def EndRightIndent(*args, **kwargs):
"""
EndRightIndent(self) -> bool
End right indent
"""
return _richtext.RichTextCtrl_EndRightIndent(*args, **kwargs)
def BeginParagraphSpacing(*args, **kwargs):
"""
BeginParagraphSpacing(self, int before, int after) -> bool
Begin paragraph spacing
"""
return _richtext.RichTextCtrl_BeginParagraphSpacing(*args, **kwargs)
def EndParagraphSpacing(*args, **kwargs):
"""
EndParagraphSpacing(self) -> bool
End paragraph spacing
"""
return _richtext.RichTextCtrl_EndParagraphSpacing(*args, **kwargs)
def BeginLineSpacing(*args, **kwargs):
"""
BeginLineSpacing(self, int lineSpacing) -> bool
Begin line spacing
"""
return _richtext.RichTextCtrl_BeginLineSpacing(*args, **kwargs)
def EndLineSpacing(*args, **kwargs):
"""
EndLineSpacing(self) -> bool
End line spacing
"""
return _richtext.RichTextCtrl_EndLineSpacing(*args, **kwargs)
def BeginNumberedBullet(*args, **kwargs):
"""
BeginNumberedBullet(self, int bulletNumber, int leftIndent, int leftSubIndent,
int bulletStyle=wxTEXT_ATTR_BULLET_STYLE_ARABIC|wxTEXT_ATTR_BULLET_STYLE_PERIOD) -> bool
Begin numbered bullet
"""
return _richtext.RichTextCtrl_BeginNumberedBullet(*args, **kwargs)
def EndNumberedBullet(*args, **kwargs):
"""
EndNumberedBullet(self) -> bool
End numbered bullet
"""
return _richtext.RichTextCtrl_EndNumberedBullet(*args, **kwargs)
def BeginSymbolBullet(*args, **kwargs):
"""
BeginSymbolBullet(self, String symbol, int leftIndent, int leftSubIndent, int bulletStyle=TEXT_ATTR_BULLET_STYLE_SYMBOL) -> bool
Begin symbol bullet
"""
return _richtext.RichTextCtrl_BeginSymbolBullet(*args, **kwargs)
def EndSymbolBullet(*args, **kwargs):
"""
EndSymbolBullet(self) -> bool
End symbol bullet
"""
return _richtext.RichTextCtrl_EndSymbolBullet(*args, **kwargs)
def BeginStandardBullet(*args, **kwargs):
"""
BeginStandardBullet(self, String bulletName, int leftIndent, int leftSubIndent,
int bulletStyle=TEXT_ATTR_BULLET_STYLE_STANDARD) -> bool
Begin standard bullet
"""
return _richtext.RichTextCtrl_BeginStandardBullet(*args, **kwargs)
def EndStandardBullet(*args, **kwargs):
"""
EndStandardBullet(self) -> bool
End standard bullet
"""
return _richtext.RichTextCtrl_EndStandardBullet(*args, **kwargs)
def BeginCharacterStyle(*args, **kwargs):
"""
BeginCharacterStyle(self, String characterStyle) -> bool
Begin named character style
"""
return _richtext.RichTextCtrl_BeginCharacterStyle(*args, **kwargs)
def EndCharacterStyle(*args, **kwargs):
"""
EndCharacterStyle(self) -> bool
End named character style
"""
return _richtext.RichTextCtrl_EndCharacterStyle(*args, **kwargs)
def BeginParagraphStyle(*args, **kwargs):
"""
BeginParagraphStyle(self, String paragraphStyle) -> bool
Begin named paragraph style
"""
return _richtext.RichTextCtrl_BeginParagraphStyle(*args, **kwargs)
def EndParagraphStyle(*args, **kwargs):
"""
EndParagraphStyle(self) -> bool
End named character style
"""
return _richtext.RichTextCtrl_EndParagraphStyle(*args, **kwargs)
def BeginListStyle(*args, **kwargs):
"""
BeginListStyle(self, String listStyle, int level=1, int number=1) -> bool
Begin named list style.
"""
return _richtext.RichTextCtrl_BeginListStyle(*args, **kwargs)
def EndListStyle(*args, **kwargs):
"""
EndListStyle(self) -> bool
End named list style.
"""
return _richtext.RichTextCtrl_EndListStyle(*args, **kwargs)
def BeginURL(*args, **kwargs):
"""
BeginURL(self, String url, String characterStyle=wxEmptyString) -> bool
Begin URL.
"""
return _richtext.RichTextCtrl_BeginURL(*args, **kwargs)
def EndURL(*args, **kwargs):
"""
EndURL(self) -> bool
End URL.
"""
return _richtext.RichTextCtrl_EndURL(*args, **kwargs)
def SetDefaultStyleToCursorStyle(*args, **kwargs):
"""
SetDefaultStyleToCursorStyle(self) -> bool
Sets the default style to the style under the cursor
"""
return _richtext.RichTextCtrl_SetDefaultStyleToCursorStyle(*args, **kwargs)
def SelectNone(*args, **kwargs):
"""
SelectNone(self)
Clear the selection
"""
return _richtext.RichTextCtrl_SelectNone(*args, **kwargs)
def SelectWord(*args, **kwargs):
"""
SelectWord(self, long position) -> bool
Select the word at the given character position
"""
return _richtext.RichTextCtrl_SelectWord(*args, **kwargs)
def GetSelectionRange(*args, **kwargs):
"""
GetSelectionRange(self) -> RichTextRange
Get the selection range in character positions.
"""
return _richtext.RichTextCtrl_GetSelectionRange(*args, **kwargs)
def SetSelectionRange(*args, **kwargs):
"""
SetSelectionRange(self, RichTextRange range)
Set the selection range in character positions. The end point of range
is specified as the last character position of the span of text, plus
one. So, for example, to set the selection for a character at position
5, use the range (5,6).
"""
return _richtext.RichTextCtrl_SetSelectionRange(*args, **kwargs)
def GetInternalSelectionRange(*args, **kwargs):
"""
GetInternalSelectionRange(self) -> RichTextRange
Get the selection range in character positions. The range is in
internal format, i.e. a single character selection is denoted by (n,n).
"""
return _richtext.RichTextCtrl_GetInternalSelectionRange(*args, **kwargs)
def SetInternalSelectionRange(*args, **kwargs):
"""
SetInternalSelectionRange(self, RichTextRange range)
Set the selection range in character positions. The range is in
internal format, i.e. a single character selection is denoted by (n,n).
"""
return _richtext.RichTextCtrl_SetInternalSelectionRange(*args, **kwargs)
def AddParagraph(*args, **kwargs):
"""
AddParagraph(self, String text) -> RichTextRange
Add a new paragraph of text to the end of the buffer
"""
return _richtext.RichTextCtrl_AddParagraph(*args, **kwargs)
def AddImage(*args, **kwargs):
"""
AddImage(self, Image image) -> RichTextRange
Add an image
"""
return _richtext.RichTextCtrl_AddImage(*args, **kwargs)
def LayoutContent(*args, **kwargs):
"""
LayoutContent(self, bool onlyVisibleRect=False) -> bool
Layout the buffer: which we must do before certain operations, such as
setting the caret position.
"""
return _richtext.RichTextCtrl_LayoutContent(*args, **kwargs)
def MoveCaret(*args, **kwargs):
"""
MoveCaret(self, long pos, bool showAtLineStart=False) -> bool
Move the caret to the given character position
"""
return _richtext.RichTextCtrl_MoveCaret(*args, **kwargs)
def MoveRight(*args, **kwargs):
"""
MoveRight(self, int noPositions=1, int flags=0) -> bool
Move right
"""
return _richtext.RichTextCtrl_MoveRight(*args, **kwargs)
def MoveLeft(*args, **kwargs):
"""
MoveLeft(self, int noPositions=1, int flags=0) -> bool
Move left
"""
return _richtext.RichTextCtrl_MoveLeft(*args, **kwargs)
def MoveUp(*args, **kwargs):
"""
MoveUp(self, int noLines=1, int flags=0) -> bool
Move up
"""
return _richtext.RichTextCtrl_MoveUp(*args, **kwargs)
def MoveDown(*args, **kwargs):
"""
MoveDown(self, int noLines=1, int flags=0) -> bool
Move down
"""
return _richtext.RichTextCtrl_MoveDown(*args, **kwargs)
def MoveToLineEnd(*args, **kwargs):
"""
MoveToLineEnd(self, int flags=0) -> bool
Move to the end of the line
"""
return _richtext.RichTextCtrl_MoveToLineEnd(*args, **kwargs)
def MoveToLineStart(*args, **kwargs):
"""
MoveToLineStart(self, int flags=0) -> bool
Move to the start of the line
"""
return _richtext.RichTextCtrl_MoveToLineStart(*args, **kwargs)
def MoveToParagraphEnd(*args, **kwargs):
"""
MoveToParagraphEnd(self, int flags=0) -> bool
Move to the end of the paragraph
"""
return _richtext.RichTextCtrl_MoveToParagraphEnd(*args, **kwargs)
def MoveToParagraphStart(*args, **kwargs):
"""
MoveToParagraphStart(self, int flags=0) -> bool
Move to the start of the paragraph
"""
return _richtext.RichTextCtrl_MoveToParagraphStart(*args, **kwargs)
def MoveHome(*args, **kwargs):
"""
MoveHome(self, int flags=0) -> bool
Move to the start of the buffer
"""
return _richtext.RichTextCtrl_MoveHome(*args, **kwargs)
def MoveEnd(*args, **kwargs):
"""
MoveEnd(self, int flags=0) -> bool
Move to the end of the buffer
"""
return _richtext.RichTextCtrl_MoveEnd(*args, **kwargs)
def PageUp(*args, **kwargs):
"""
PageUp(self, int noPages=1, int flags=0) -> bool
Move n pages up
"""
return _richtext.RichTextCtrl_PageUp(*args, **kwargs)
def PageDown(*args, **kwargs):
"""
PageDown(self, int noPages=1, int flags=0) -> bool
Move n pages down
"""
return _richtext.RichTextCtrl_PageDown(*args, **kwargs)
def WordLeft(*args, **kwargs):
"""
WordLeft(self, int noPages=1, int flags=0) -> bool
Move n words left
"""
return _richtext.RichTextCtrl_WordLeft(*args, **kwargs)
def WordRight(*args, **kwargs):
"""
WordRight(self, int noPages=1, int flags=0) -> bool
Move n words right
"""
return _richtext.RichTextCtrl_WordRight(*args, **kwargs)
def GetBuffer(*args, **kwargs):
"""
GetBuffer(self) -> RichTextBuffer
Returns the buffer associated with the control.
"""
return _richtext.RichTextCtrl_GetBuffer(*args, **kwargs)
def BeginBatchUndo(*args, **kwargs):
"""
| BeginBatchUndo(self, String cmdName) -> bool | 9,452 | lcc_e | python | null | 7c4c9949f60f4f327a48f2671410466485a03f060befc851 |
|
"""
Module for managing the installation of DIRAC components:
MySQL, DB's, NoSQL DBs, Services, Agents, Executors and Consumers
It only makes use of defaults in LocalInstallation Section in dirac.cfg
The Following Options are used::
/DIRAC/Setup: Setup to be used for any operation
/LocalInstallation/InstanceName: Name of the Instance for the current Setup (default /DIRAC/Setup)
/LocalInstallation/LogLevel: LogLevel set in "run" script for all components installed
/LocalInstallation/RootPath: Used instead of rootPath in "run" script if defined (if links are used to named versions)
/LocalInstallation/InstancePath: Location where runit and startup directories are created (default rootPath)
/LocalInstallation/UseVersionsDir: DIRAC is installed under versions/<Versioned Directory> with a link from pro
(This option overwrites RootPath and InstancePath)
/LocalInstallation/Host: Used when build the URL to be published for the installed service (default: socket.getfqdn())
/LocalInstallation/RunitDir: Location where runit directory is created (default InstancePath/runit)
/LocalInstallation/StartupDir: Location where startup directory is created (default InstancePath/startup)
/LocalInstallation/MySQLDir: Location where mysql databases are created (default InstancePath/mysql)
/LocalInstallation/Database/User: (default Dirac)
/LocalInstallation/Database/Password: (must be set for SystemAdministrator Service to work)
/LocalInstallation/Database/RootUser: (default root)
/LocalInstallation/Database/RootPwd: (must be set for SystemAdministrator Service to work)
/LocalInstallation/Database/Host: (must be set for SystemAdministrator Service to work)
/LocalInstallation/Database/Port: (default 3306)
/LocalInstallation/Database/MySQLSmallMem: Configure a MySQL with small memory requirements for testing purposes innodb_buffer_pool_size=200MB
/LocalInstallation/Database/MySQLLargeMem: Configure a MySQL with high memory requirements for production purposes innodb_buffer_pool_size=10000MB
/LocalInstallation/NoSQLDatabase/User: (default Dirac)
/LocalInstallation/NoSQLDatabase/Password: (must be set for SystemAdministrator Service to work)
/LocalInstallation/NoSQLDatabase/Host: (must be set for SystemAdministrator Service to work)
/LocalInstallation/NoSQLDatabase/Port: (default 9200)
The setupSite method (used by the dirac-setup-site command) will use the following info::
/LocalInstallation/Systems: List of Systems to be defined for this instance in the CS (default: Configuration, Framework)
/LocalInstallation/Databases: List of MySQL Databases to be installed and configured
/LocalInstallation/Services: List of System/ServiceName to be setup
/LocalInstallation/Agents: List of System/AgentName to be setup
/LocalInstallation/WebPortal: Boolean to setup the Web Portal (default no)
/LocalInstallation/ConfigurationMaster: Boolean, requires Configuration/Server to be given in the list of Services (default: no)
/LocalInstallation/PrivateConfiguration: Boolean, requires Configuration/Server to be given in the list of Services (default: no)
If a Master Configuration Server is being installed the following Options can be used::
/LocalInstallation/ConfigurationName: Name of the Configuration (default: Setup )
/LocalInstallation/AdminUserName: Name of the Admin user (default: None )
/LocalInstallation/AdminUserDN: DN of the Admin user certificate (default: None )
/LocalInstallation/AdminUserEmail: Email of the Admin user (default: None )
/LocalInstallation/AdminGroupName: Name of the Admin group (default: dirac_admin )
/LocalInstallation/HostDN: DN of the host certificate (default: None )
/LocalInstallation/VirtualOrganization: Name of the main Virtual Organization (default: None)
"""
import os
import re
import glob
import stat
import time
import shutil
import socket
import DIRAC
from DIRAC import rootPath
from DIRAC import gConfig
from DIRAC import gLogger
from DIRAC.Core.Utilities.Subprocess import systemCall
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.Core.Utilities.CFG import CFG
from DIRAC.Core.Utilities.Version import getVersion
from DIRAC.Core.Utilities.File import mkDir, mkLink
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.ConfigurationSystem.Client.Helpers import cfgPath, cfgPathToList, cfgInstallPath, cfgInstallSection, CSGlobals
from DIRAC.Core.Security.Properties import ALARMS_MANAGEMENT, SERVICE_ADMINISTRATOR, \
CS_ADMINISTRATOR, JOB_ADMINISTRATOR, \
FULL_DELEGATION, PROXY_MANAGEMENT, OPERATOR, \
NORMAL_USER, TRUSTED_HOST
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.FrameworkSystem.Client.ComponentMonitoringClient import ComponentMonitoringClient
from DIRAC.FrameworkSystem.Utilities import MonitoringUtilities
from DIRAC.Core.Base.private.ModuleLoader import ModuleLoader
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Base.ExecutorModule import ExecutorModule
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities.PrettyPrint import printTable
from DIRAC.Core.Utilities.Platform import getPlatformString
__RCSID__ = "$Id$"
class ComponentInstaller( object ):
def __init__( self ):
self.gDefaultPerms = stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH
# On command line tools this can be set to True to abort after the first error.
self.exitOnError = False
# First some global defaults
gLogger.debug( 'DIRAC Root Path =', rootPath )
self.mysqlMode = ''
self.localCfg = None
self.cfgFile = ''
self.setup = ''
self.instance = ''
self.logLevel = ''
self.linkedRootPath = ''
self.host = ''
self.basePath = ''
self.instancePath = ''
self.runitDir = ''
self.startDir = ''
self.db = {}
self.mysqlDir = ''
self.mysqlDbDir = ''
self.mysqlLogDir = ''
self.mysqlMyOrg = ''
self.mysqlMyCnf = ''
self.mysqlStartupScript = ''
self.mysqlUser = ''
self.mysqlPassword = ''
self.mysqlRootUser = ''
self.mysqlRootPwd = ''
self.mysqlHost = ''
self.mysqlPort = ''
self.mysqlSmallMem = ''
self.mysqlLargeMem = ''
self.noSQLUser = ''
self.noSQLPassword = ''
self.noSQLHost = ''
self.noSQLPort = ''
self.controlDir = ''
self.componentTypes = [ 'service', 'agent', 'executor', 'consumer' ]
self.monitoringClient = None
self.loadDiracCfg()
def loadDiracCfg( self ):
""" Read again defaults from dirac.cfg
"""
from DIRAC.Core.Utilities.Network import getFQDN
self.localCfg = CFG()
self.cfgFile = os.path.join( rootPath, 'etc', 'dirac.cfg' )
try:
self.localCfg.loadFromFile( self.cfgFile )
except Exception:
gLogger.always( "Can't load ", self.cfgFile )
gLogger.always( "Might be OK if setting up the site" )
self.setup = self.localCfg.getOption( cfgPath( 'DIRAC', 'Setup' ), '' )
self.instance = self.localCfg.getOption( cfgInstallPath( 'InstanceName' ), self.setup )
self.logLevel = self.localCfg.getOption( cfgInstallPath( 'LogLevel' ), 'INFO' )
self.linkedRootPath = self.localCfg.getOption( cfgInstallPath( 'RootPath' ), rootPath )
useVersionsDir = self.localCfg.getOption( cfgInstallPath( 'UseVersionsDir' ), False )
self.host = self.localCfg.getOption( cfgInstallPath( 'Host' ), getFQDN() )
self.basePath = os.path.dirname( rootPath )
self.instancePath = self.localCfg.getOption( cfgInstallPath( 'InstancePath' ), rootPath )
if useVersionsDir:
# This option takes precedence
self.instancePath = os.path.dirname( os.path.dirname( rootPath ) )
self.linkedRootPath = os.path.join( self.instancePath, 'pro' )
gLogger.verbose( 'Using Instance Base Dir at', self.instancePath )
self.runitDir = os.path.join( self.instancePath, 'runit' )
self.runitDir = self.localCfg.getOption( cfgInstallPath( 'RunitDir' ), self.runitDir )
gLogger.verbose( 'Using Runit Dir at', self.runitDir )
self.startDir = os.path.join( self.instancePath, 'startup' )
self.startDir = self.localCfg.getOption( cfgInstallPath( 'StartupDir' ), self.startDir )
gLogger.verbose( 'Using Startup Dir at', self.startDir )
self.controlDir = os.path.join( self.instancePath, 'control' )
self.controlDir = self.localCfg.getOption( cfgInstallPath( 'ControlDir' ), self.controlDir )
gLogger.verbose( 'Using Control Dir at', self.controlDir )
# Now some MySQL default values
self.mysqlDir = os.path.join( self.instancePath, 'mysql' )
self.mysqlDir = self.localCfg.getOption( cfgInstallPath( 'MySQLDir' ), self.mysqlDir )
gLogger.verbose( 'Using MySQL Dir at', self.mysqlDir )
self.mysqlDbDir = os.path.join( self.mysqlDir, 'db' )
self.mysqlLogDir = os.path.join( self.mysqlDir, 'log' )
self.mysqlMyOrg = os.path.join( rootPath, 'mysql', 'etc', 'my.cnf' )
self.mysqlMyCnf = os.path.join( self.mysqlDir, '.my.cnf' )
self.mysqlStartupScript = os.path.join( rootPath, 'mysql', 'share', 'mysql', 'mysql.server' )
self.mysqlRootPwd = self.localCfg.getOption( cfgInstallPath( 'Database', 'RootPwd' ), self.mysqlRootPwd )
if self.mysqlRootPwd:
gLogger.verbose( 'Reading Root MySQL Password from local configuration' )
else:
gLogger.warn( 'MySQL root password not found' )
self.mysqlUser = self.localCfg.getOption( cfgInstallPath( 'Database', 'User' ), self.mysqlUser )
if self.mysqlUser:
gLogger.verbose( 'Reading MySQL User from local configuration' )
else:
gLogger.warn( "Using 'Dirac' as MySQL user name" )
self.mysqlUser = 'Dirac'
self.mysqlPassword = self.localCfg.getOption( cfgInstallPath( 'Database', 'Password' ), self.mysqlPassword )
if self.mysqlPassword:
gLogger.verbose( 'Reading %s MySQL Password from local configuration ' % self.mysqlUser )
else:
gLogger.warn( 'MySQL password not found' )
self.mysqlHost = self.localCfg.getOption( cfgInstallPath( 'Database', 'Host' ), '' )
if self.mysqlHost:
gLogger.verbose( 'Using MySQL Host from local configuration', self.mysqlHost )
else:
gLogger.warn( 'Using the same host for MySQL as dirac services' )
self.mysqlHost = self.host
self.mysqlPort = self.localCfg.getOption( cfgInstallPath( 'Database', 'Port' ), 0 )
if self.mysqlPort:
gLogger.verbose( 'Using MySQL Port from local configuration ', self.mysqlPort )
else:
gLogger.warn( "Using port '3306' as MySQL port" )
self.mysqlPort = 3306
self.mysqlRootUser = self.localCfg.getOption( cfgInstallPath( 'Database', 'RootUser' ), '' )
if self.mysqlRootUser:
gLogger.verbose( 'Using MySQL root user from local configuration ', self.mysqlRootUser )
else:
gLogger.warn( "Using 'root' as root MySQL user" )
self.mysqlRootUser = 'root'
self.mysqlMode = self.localCfg.getOption( cfgInstallPath( 'Database', 'MySQLMode' ), '' )
if self.mysqlMode:
gLogger.verbose( 'Configuring MySQL server as %s' % self.mysqlMode )
self.mysqlSmallMem = self.localCfg.getOption( cfgInstallPath( 'Database', 'MySQLSmallMem' ), False )
if self.mysqlSmallMem:
gLogger.verbose( 'Configuring MySQL server for Low Memory usage' )
self.mysqlLargeMem = self.localCfg.getOption( cfgInstallPath( 'Database', 'MySQLLargeMem' ), False )
if self.mysqlLargeMem:
gLogger.verbose( 'Configuring MySQL server for Large Memory usage' )
# Now some noSQL defaults
self.noSQLUser = self.localCfg.getOption( cfgInstallPath( 'NoSQLDatabase', 'User' ), self.noSQLUser )
if self.noSQLUser:
gLogger.verbose( 'Reading NoSQL User from local configuration' )
else:
gLogger.warn( 'Using default NoSQL User' )
self.noSQLUser = 'Dirac'
self.noSQLPassword = self.localCfg.getOption( cfgInstallPath( 'NoSQLDatabase', 'Password' ), self.noSQLPassword )
if self.noSQLPassword:
gLogger.verbose( 'Reading %s NoSQL Password from local configuration ' % self.noSQLUser )
else:
gLogger.warn( 'NoSQL password not found' )
self.noSQLHost = self.localCfg.getOption( cfgInstallPath( 'NoSQLDatabase', 'Host' ), '' )
if self.noSQLHost:
gLogger.verbose( 'Using NoSQL Host from local configuration', self.noSQLHost )
else:
gLogger.warn( 'Using the same host for NoSQL as dirac services' )
self.noSQLHost = self.host
self.noSQLPort = self.localCfg.getOption( cfgInstallPath( 'NoSQLDatabase', 'Port' ), 0 )
if self.noSQLPort:
gLogger.verbose( 'Using NoSQL Port from local configuration ', self.noSQLPort )
else:
gLogger.warn( 'Using the default port 9200' )
self.noSQLPort = 9200
# Now ready to insert components in the Component Monitoring DB
self.monitoringClient = ComponentMonitoringClient()
gLogger.verbose( 'Client configured for Component Monitoring' )
def getInfo( self ):
result = getVersion()
if not result['OK']:
return result
rDict = result['Value']
if self.setup:
rDict['Setup'] = self.setup
else:
rDict['Setup'] = 'Unknown'
return S_OK( rDict )
def getExtensions( self ):
"""
Get the list of installed extensions
"""
initList = glob.glob( os.path.join( rootPath, '*DIRAC', '__init__.py' ) )
extensions = [ os.path.basename( os.path.dirname( k ) ) for k in initList]
try:
extensions.remove( 'DIRAC' )
except Exception:
error = 'DIRAC is not properly installed'
gLogger.exception( error )
if self.exitOnError:
DIRAC.exit( -1 )
return S_ERROR( error )
return S_OK( extensions )
def _addCfgToDiracCfg( self, cfg ):
"""
Merge cfg into existing dirac.cfg file
"""
if str( self.localCfg ):
newCfg = self.localCfg.mergeWith( cfg )
else:
newCfg = cfg
result = newCfg.writeToFile( self.cfgFile )
if not result:
return result
self.loadDiracCfg()
return result
def _addCfgToCS( self, cfg ):
"""
Merge cfg into central CS
"""
cfgClient = CSAPI()
result = cfgClient.downloadCSData()
if not result['OK']:
return result
result = cfgClient.mergeFromCFG( cfg )
if not result['OK']:
return result
result = cfgClient.commit()
return result
def _addCfgToLocalCS( self, cfg ):
"""
Merge cfg into local CS
"""
csName = self.localCfg.getOption( cfgPath( 'DIRAC', 'Configuration', 'Name' ) , '' )
if not csName:
error = 'Missing %s' % cfgPath( 'DIRAC', 'Configuration', 'Name' )
if self.exitOnError:
gLogger.error( error )
DIRAC.exit( -1 )
return S_ERROR( error )
csCfg = CFG()
csFile = os.path.join( rootPath, 'etc', '%s.cfg' % csName )
if os.path.exists( csFile ):
csCfg.loadFromFile( csFile )
if str( csCfg ):
newCfg = csCfg.mergeWith( cfg )
else:
newCfg = cfg
return newCfg.writeToFile( csFile )
def _removeOptionFromCS( self, path ):
"""
Delete options from central CS
"""
cfgClient = CSAPI()
result = cfgClient.downloadCSData()
if not result['OK']:
return result
result = cfgClient.delOption( path )
if not result['OK']:
return result
result = cfgClient.commit()
return result
def _removeSectionFromCS( self, path ):
"""
Delete setions from central CS
"""
cfgClient = CSAPI()
result = cfgClient.downloadCSData()
if not result['OK']:
return result
result = cfgClient.delSection( path )
if not result['OK']:
return result
result = cfgClient.commit()
return result
def _getCentralCfg( self, installCfg ):
"""
Create the skeleton of central Cfg for an initial Master CS
"""
# First copy over from installation cfg
centralCfg = CFG()
# DIRAC/Extensions
extensions = self.localCfg.getOption( cfgInstallPath( 'Extensions' ), [] )
while 'Web' in list( extensions ):
extensions.remove( 'Web' )
centralCfg.createNewSection( 'DIRAC', '' )
if extensions:
centralCfg['DIRAC'].addKey( 'Extensions', ','.join( extensions ), '' ) #pylint: disable=no-member
vo = self.localCfg.getOption( cfgInstallPath( 'VirtualOrganization' ), '' )
if vo:
centralCfg['DIRAC'].addKey( 'VirtualOrganization', vo, '' ) #pylint: disable=no-member
for section in [ 'Systems', 'Resources',
'Resources/Sites', 'Resources/Sites/DIRAC',
'Resources/Sites/LCG', 'Operations', 'Registry' ]:
if installCfg.isSection( section ):
centralCfg.createNewSection( section, contents = installCfg[section] )
# Now try to add things from the Installation section
# Registry
adminUserName = self.localCfg.getOption( cfgInstallPath( 'AdminUserName' ), '' )
adminUserDN = self.localCfg.getOption( cfgInstallPath( 'AdminUserDN' ), '' )
adminUserEmail = self.localCfg.getOption( cfgInstallPath( 'AdminUserEmail' ), '' )
adminGroupName = self.localCfg.getOption( cfgInstallPath( 'AdminGroupName' ), 'dirac_admin' )
hostDN = self.localCfg.getOption( cfgInstallPath( 'HostDN' ), '' )
defaultGroupName = self.localCfg.getOption( cfgInstallPath( 'DefaultGroupName' ), 'dirac_user' )
adminGroupProperties = [ ALARMS_MANAGEMENT, SERVICE_ADMINISTRATOR,
CS_ADMINISTRATOR, JOB_ADMINISTRATOR,
FULL_DELEGATION, PROXY_MANAGEMENT, OPERATOR ]
defaultGroupProperties = [ NORMAL_USER ]
defaultHostProperties = [ TRUSTED_HOST, CS_ADMINISTRATOR,
JOB_ADMINISTRATOR, FULL_DELEGATION,
PROXY_MANAGEMENT, OPERATOR ]
for section in ( cfgPath( 'Registry' ),
cfgPath( 'Registry', 'Users' ),
cfgPath( 'Registry', 'Groups' ),
cfgPath( 'Registry', 'Hosts' ) ):
if not centralCfg.isSection( section ):
centralCfg.createNewSection( section )
if adminUserName:
if not ( adminUserDN and adminUserEmail ):
gLogger.error( 'AdminUserName is given but DN or Mail is missing it will not be configured' )
else:
for section in [ cfgPath( 'Registry', 'Users', adminUserName ),
cfgPath( 'Registry', 'Groups', defaultGroupName ),
cfgPath( 'Registry', 'Groups', adminGroupName ) ]:
if not centralCfg.isSection( section ):
centralCfg.createNewSection( section )
if centralCfg['Registry'].existsKey( 'DefaultGroup' ): #pylint: disable=unsubscriptable-object,no-member
centralCfg['Registry'].deleteKey( 'DefaultGroup' ) #pylint: disable=unsubscriptable-object,no-member
centralCfg['Registry'].addKey( 'DefaultGroup', defaultGroupName, '' ) #pylint: disable=unsubscriptable-object,no-member
if centralCfg['Registry']['Users'][adminUserName].existsKey( 'DN' ): #pylint: disable=unsubscriptable-object
centralCfg['Registry']['Users'][adminUserName].deleteKey( 'DN' ) #pylint: disable=unsubscriptable-object
centralCfg['Registry']['Users'][adminUserName].addKey( 'DN', adminUserDN, '' ) #pylint: disable=unsubscriptable-object
if centralCfg['Registry']['Users'][adminUserName].existsKey( 'Email' ): #pylint: disable=unsubscriptable-object
centralCfg['Registry']['Users'][adminUserName].deleteKey( 'Email' ) #pylint: disable=unsubscriptable-object
centralCfg['Registry']['Users'][adminUserName].addKey( 'Email' , adminUserEmail, '' ) #pylint: disable=unsubscriptable-object
# Add Admin User to Admin Group and default group
for group in [adminGroupName, defaultGroupName]:
if not centralCfg['Registry']['Groups'][group].isOption( 'Users' ): #pylint: disable=unsubscriptable-object
centralCfg['Registry']['Groups'][group].addKey( 'Users', '', '' ) #pylint: disable=unsubscriptable-object
users = centralCfg['Registry']['Groups'][group].getOption( 'Users', [] ) #pylint: disable=unsubscriptable-object
if adminUserName not in users:
centralCfg['Registry']['Groups'][group].appendToOption( 'Users', ', %s' % adminUserName ) #pylint: disable=unsubscriptable-object
if not centralCfg['Registry']['Groups'][group].isOption( 'Properties' ): #pylint: disable=unsubscriptable-object
centralCfg['Registry']['Groups'][group].addKey( 'Properties', '', '' ) #pylint: disable=unsubscriptable-object
properties = centralCfg['Registry']['Groups'][adminGroupName].getOption( 'Properties', [] ) #pylint: disable=unsubscriptable-object
for prop in adminGroupProperties:
if prop not in properties:
properties.append( prop )
centralCfg['Registry']['Groups'][adminGroupName].appendToOption( 'Properties', ', %s' % prop ) #pylint: disable=unsubscriptable-object
properties = centralCfg['Registry']['Groups'][defaultGroupName].getOption( 'Properties', [] ) #pylint: disable=unsubscriptable-object
for prop in defaultGroupProperties:
if prop not in properties:
properties.append( prop )
centralCfg['Registry']['Groups'][defaultGroupName].appendToOption( 'Properties', ', %s' % prop ) #pylint: disable=unsubscriptable-object
# Add the master Host description
if hostDN:
hostSection = cfgPath( 'Registry', 'Hosts', self.host )
if not centralCfg.isSection( hostSection ):
centralCfg.createNewSection( hostSection )
if centralCfg['Registry']['Hosts'][self.host].existsKey( 'DN' ): #pylint: disable=unsubscriptable-object
centralCfg['Registry']['Hosts'][self.host].deleteKey( 'DN' ) #pylint: disable=unsubscriptable-object
centralCfg['Registry']['Hosts'][self.host].addKey( 'DN', hostDN, '' ) #pylint: disable=unsubscriptable-object
if not centralCfg['Registry']['Hosts'][self.host].isOption( 'Properties' ): #pylint: disable=unsubscriptable-object
centralCfg['Registry']['Hosts'][self.host].addKey( 'Properties', '', '' ) #pylint: disable=unsubscriptable-object
properties = centralCfg['Registry']['Hosts'][self.host].getOption( 'Properties', [] ) #pylint: disable=unsubscriptable-object
for prop in defaultHostProperties:
if prop not in properties:
properties.append( prop )
centralCfg['Registry']['Hosts'][self.host].appendToOption( 'Properties', ', %s' % prop ) #pylint: disable=unsubscriptable-object
# Operations
if adminUserEmail:
operationsCfg = self.__getCfg( cfgPath( 'Operations', 'Defaults', 'EMail' ), 'Production', adminUserEmail )
centralCfg = centralCfg.mergeWith( operationsCfg )
operationsCfg = self.__getCfg( cfgPath( 'Operations', 'Defaults', 'EMail' ), 'Logging', adminUserEmail )
centralCfg = centralCfg.mergeWith( operationsCfg )
return centralCfg
def __getCfg( self, section, option = '', value = '' ):
"""
Create a new Cfg with given info
"""
if not section:
return None
cfg = CFG()
sectionList = []
for sect in cfgPathToList( section ):
if not sect:
continue
sectionList.append( sect )
cfg.createNewSection( cfgPath( *sectionList ) )
if not sectionList:
return None
if option and value:
sectionList.append( option )
cfg.setOption( cfgPath( *sectionList ), value )
return cfg
def addOptionToDiracCfg( self, option, value ):
"""
Add Option to dirac.cfg
"""
optionList = cfgPathToList( option )
optionName = optionList[-1]
section = cfgPath( *optionList[:-1] )
cfg = self.__getCfg( section, optionName, value )
if not cfg:
return S_ERROR( 'Wrong option: %s = %s' % ( option, value ) )
if self._addCfgToDiracCfg( cfg ):
return S_OK()
return S_ERROR( 'Could not merge %s=%s with local configuration' % ( option, value ) )
def removeComponentOptionsFromCS( self, system, component, mySetup = None ):
"""
Remove the section with Component options from the CS, if possible
"""
if mySetup is None:
mySetup = self.setup
result = self.monitoringClient.getInstallations( { 'UnInstallationTime': None, 'Instance': component },
{ 'System': system },
{}, True )
if not result[ 'OK' ]:
return result
installations = result[ 'Value' ]
instanceOption = cfgPath( 'DIRAC', 'Setups', mySetup, system )
if gConfig:
compInstance = gConfig.getValue( instanceOption, '' )
else:
compInstance = self.localCfg.getOption( instanceOption, '' )
if len( installations ) == 1:
remove = True
removeMain = False
installation = installations[0]
cType = installation[ 'Component' ][ 'Type' ]
# Is the component a rename of another module?
if installation[ 'Instance' ] == installation[ 'Component' ][ 'Module' ]:
isRenamed = False
else:
isRenamed = True
result = self.monitoringClient.getInstallations( { 'UnInstallationTime': None },
{ 'System': system, 'Module': installation[ 'Component' ][ 'Module' ] },
{}, True )
if not result[ 'OK' ]:
return result
installations = result[ 'Value' ]
# If the component is not renamed we keep it in the CS if there are any renamed ones
if not isRenamed:
if len( installations ) > 1:
remove = False
# If the component is renamed and is the last one, we remove the entry for the main module as well
else:
if len( installations ) == 1:
removeMain = True
if remove:
result = self._removeSectionFromCS( cfgPath( 'Systems', system,
compInstance,
installation[ 'Component' ][ 'Type' ].title() + 's', component ) )
if not result[ 'OK' ]:
return result
if not isRenamed and cType == 'service':
result = self._removeOptionFromCS( cfgPath( 'Systems', system, compInstance, 'URLs', component ) )
if not result[ 'OK' ]:
# It is maybe in the FailoverURLs ?
result = self._removeOptionFromCS( cfgPath( 'Systems', system, compInstance, 'FailoverURLs', component ) )
if not result['OK']:
return result
if removeMain:
result = self._removeSectionFromCS( cfgPath( 'Systems', system,
compInstance,
installation[ 'Component' ][ 'Type' ].title() + 's',
installation[ 'Component' ][ 'Module' ] ) )
if not result[ 'OK' ]:
return result
if cType == 'service':
result = self._removeOptionFromCS( cfgPath( 'Systems', system, compInstance, 'URLs', installation[ 'Component' ][ 'Module' ] ) )
if not result[ 'OK' ]:
# it is maybe in the FailoverURLs ?
result = self._removeOptionFromCS( cfgPath( 'Systems', system, compInstance, 'FailoverURLs', installation[ 'Component' ][ 'Module' ] ) )
if not result['OK']:
return result
return S_OK( 'Successfully removed entries from CS' )
return S_OK( 'Instances of this component still exist. It won\'t be completely removed' )
def addDefaultOptionsToCS( self, gConfig_o, componentType, systemName,
component, extensions, mySetup = None,
specialOptions = {}, overwrite = False,
addDefaultOptions = True ):
"""
Add the section with the component options to the CS
"""
if mySetup is None:
mySetup = self.setup
if gConfig_o:
gConfig_o.forceRefresh()
system = systemName.replace( 'System', '' )
instanceOption = cfgPath( 'DIRAC', 'Setups', mySetup, system )
if gConfig_o:
compInstance = gConfig_o.getValue( instanceOption, '' )
else:
compInstance = self.localCfg.getOption( instanceOption, '' )
if not compInstance:
return S_ERROR( '%s not defined in %s' % ( instanceOption, self.cfgFile ) )
result = self._getSectionName( componentType )
if not result[ 'OK' ]:
return result
sectionName = result[ 'Value' ]
# Check if the component CS options exist
addOptions = True
componentSection = cfgPath( 'Systems', system, compInstance, sectionName, component )
if not overwrite:
if gConfig_o:
result = gConfig_o.getOptions( componentSection )
if result['OK']:
addOptions = False
if not addOptions:
return S_OK( 'Component options already exist' )
# Add the component options now
result = self.getComponentCfg( componentType, system, component, compInstance, extensions, specialOptions, addDefaultOptions )
if not result['OK']:
return result
compCfg = result['Value']
gLogger.notice( 'Adding to CS', '%s %s/%s' % ( componentType, system, component ) )
resultAddToCFG = self._addCfgToCS( compCfg )
if componentType == 'executor':
# Is it a container ?
execList = compCfg.getOption( '%s/Load' % componentSection, [] )
for element in execList:
result = self.addDefaultOptionsToCS( gConfig_o, componentType, systemName, element, extensions, self.setup,
{}, overwrite )
resultAddToCFG.setdefault( 'Modules', {} )
resultAddToCFG['Modules'][element] = result['OK']
return resultAddToCFG
def addDefaultOptionsToComponentCfg( self, componentType, systemName, component, extensions ):
"""
Add default component options local component cfg
"""
system = systemName.replace( 'System', '' )
instanceOption = cfgPath( 'DIRAC', 'Setups', self.setup, system )
compInstance = self.localCfg.getOption( instanceOption, '' )
if not compInstance:
return S_ERROR( '%s not defined in %s' % ( instanceOption, self.cfgFile ) )
# Add the component options now
result = self.getComponentCfg( componentType, system, component, compInstance, extensions )
if not result['OK']:
return result
compCfg = result['Value']
compCfgFile = os.path.join( rootPath, 'etc', '%s_%s.cfg' % ( system, component ) )
return compCfg.writeToFile( compCfgFile )
def addCfgToComponentCfg( self, componentType, systemName, component, cfg ):
"""
Add some extra configuration to the local component cfg
"""
result = self._getSectionName( componentType )
if not result[ 'OK' ]:
return result
sectionName = result[ 'Value' ]
if not cfg:
return S_OK()
system = systemName.replace( 'System', '' )
instanceOption = cfgPath( 'DIRAC', 'Setups', self.setup, system )
compInstance = self.localCfg.getOption( instanceOption, '' )
if not compInstance:
return S_ERROR( '%s not defined in %s' % ( instanceOption, self.cfgFile ) )
compCfgFile = os.path.join( rootPath, 'etc', '%s_%s.cfg' % ( system, component ) )
compCfg = CFG()
if os.path.exists( compCfgFile ):
compCfg.loadFromFile( compCfgFile )
sectionPath = cfgPath( 'Systems', system, compInstance, sectionName )
newCfg = self.__getCfg( sectionPath )
newCfg.createNewSection( cfgPath( sectionPath, component ), 'Added by ComponentInstaller', cfg )
if newCfg.writeToFile( compCfgFile ):
return S_OK( compCfgFile )
error = 'Can not write %s' % compCfgFile
gLogger.error( error )
return S_ERROR( error )
def getComponentCfg( self, componentType, system, component, compInstance, extensions,
specialOptions = {}, addDefaultOptions = True ):
"""
Get the CFG object of the component configuration
"""
result = self._getSectionName( componentType )
if not result[ 'OK' ]:
return result
sectionName = result[ 'Value' ]
componentModule = component
if "Module" in specialOptions and specialOptions[ 'Module' ]:
componentModule = specialOptions['Module']
compCfg = CFG()
if addDefaultOptions:
extensionsDIRAC = [ x + 'DIRAC' for x in extensions ] + extensions
for ext in extensionsDIRAC + ['DIRAC']:
cfgTemplatePath = os.path.join( rootPath, ext, '%sSystem' % system, 'ConfigTemplate.cfg' )
if os.path.exists( cfgTemplatePath ):
gLogger.notice( 'Loading configuration template', cfgTemplatePath )
# Look up the component in this template
loadCfg = CFG()
loadCfg.loadFromFile( cfgTemplatePath )
compCfg = loadCfg.mergeWith( compCfg )
compPath = cfgPath( sectionName, componentModule )
if not compCfg.isSection( compPath ):
error = 'Can not find %s in template' % compPath
gLogger.error( error )
if self.exitOnError:
DIRAC.exit( -1 )
return S_ERROR( error )
compCfg = compCfg[sectionName][componentModule] #pylint: disable=unsubscriptable-object
# Delete Dependencies section if any
compCfg.deleteKey( 'Dependencies' )
sectionPath = cfgPath( 'Systems', system, compInstance, sectionName )
cfg = self.__getCfg( sectionPath )
cfg.createNewSection( cfgPath( sectionPath, component ), '', compCfg )
for option, value in specialOptions.items():
cfg.setOption( cfgPath( sectionPath, component, option ), value )
# Add the service URL
if componentType == "service":
port = compCfg.getOption( 'Port' , 0 )
if port and self.host:
urlsPath = cfgPath( 'Systems', system, compInstance, 'URLs' )
cfg.createNewSection( urlsPath )
failoverUrlsPath = cfgPath( 'Systems', system, compInstance, 'FailoverURLs' )
cfg.createNewSection( failoverUrlsPath )
cfg.setOption( cfgPath( urlsPath, component ),
'dips://%s:%d/%s/%s' % ( self.host, port, system, component ) )
return S_OK( cfg )
def addDatabaseOptionsToCS( self, gConfig_o, systemName, dbName, mySetup = None, overwrite = False ):
"""
Add the section with the database options to the CS
"""
if mySetup is None:
mySetup = self.setup
if gConfig_o:
gConfig_o.forceRefresh()
system = systemName.replace( 'System', '' )
instanceOption = cfgPath( 'DIRAC', 'Setups', mySetup, system )
if gConfig_o:
compInstance = gConfig_o.getValue( instanceOption, '' )
else:
compInstance = self.localCfg.getOption( instanceOption, '' )
if not compInstance:
return S_ERROR( '%s not defined in %s' % ( instanceOption, self.cfgFile ) )
# Check if the component CS options exist
addOptions = True
if not overwrite:
databasePath = cfgPath( 'Systems', system, compInstance, 'Databases', dbName )
result = gConfig_o.getOptions( databasePath )
if result['OK']:
addOptions = False
if not addOptions:
return S_OK( 'Database options already exist' )
# Add the component options now
result = self.getDatabaseCfg( system, dbName, compInstance )
if not result['OK']:
return result
databaseCfg = result['Value']
gLogger.notice( 'Adding to CS', '%s/%s' % ( system, dbName ) )
return self._addCfgToCS( databaseCfg )
def removeDatabaseOptionsFromCS( self, gConfig_o, system, dbName, mySetup = None ):
"""
Remove the section with database options from the CS, if possible
"""
if mySetup is None:
mySetup = self.setup
result = self.monitoringClient.installationExists( { 'UnInstallationTime': None },
{ 'System': system, 'Type': 'DB', 'Module': dbName },
{} )
if not result[ 'OK' ]:
return result
exists = result[ 'Value' ]
instanceOption = cfgPath( 'DIRAC', 'Setups', mySetup, system )
if gConfig_o:
compInstance = gConfig_o.getValue( instanceOption, '' )
else:
compInstance = self.localCfg.getOption( instanceOption, '' )
if not exists:
result = self._removeSectionFromCS( cfgPath( 'Systems', system, compInstance, 'Databases', dbName ) )
if not result[ 'OK' ]:
return result
return S_OK( 'Successfully removed entries from CS' )
def getDatabaseCfg( self, system, dbName, compInstance ):
"""
Get the CFG object of the database configuration
"""
databasePath = cfgPath( 'Systems', system, compInstance, 'Databases', dbName )
cfg = self.__getCfg( databasePath, 'DBName', dbName )
cfg.setOption( cfgPath( databasePath, 'Host' ), self.mysqlHost )
cfg.setOption( cfgPath( databasePath, 'Port' ), self.mysqlPort )
return S_OK( cfg )
def addSystemInstance( self, systemName, compInstance, mySetup = None, myCfg = False ):
"""
Add a new system self.instance to dirac.cfg and CS
"""
if mySetup is None:
mySetup = self.setup
system = systemName.replace( 'System', '' )
gLogger.notice( 'Adding %s system as %s self.instance for %s self.setup to dirac.cfg and CS' % ( system, compInstance, mySetup ) )
cfg = self.__getCfg( cfgPath( 'DIRAC', 'Setups', mySetup ), system, compInstance )
if myCfg:
if not self._addCfgToDiracCfg( cfg ):
return S_ERROR( 'Failed to add system self.instance to dirac.cfg' )
return self._addCfgToCS( cfg )
def printStartupStatus( self, rDict ):
"""
Print in nice format the return dictionary from self.getStartupComponentStatus
(also returned by self.runsvctrlComponent)
"""
fields = ['Name', 'Runit', 'Uptime', 'PID']
records = []
try:
for comp in rDict:
records.append( [comp,
rDict[comp]['RunitStatus'],
rDict[comp]['Timeup'],
str( rDict[comp]['PID'] ) ] )
printTable( fields, records )
except Exception as x:
print "Exception while gathering data for printing: %s" % str( x )
return S_OK()
def printOverallStatus( self, rDict ):
"""
Print in nice format the return dictionary from self.getOverallStatus
"""
fields = ['System', 'Name', 'Type', 'Setup', 'Installed', 'Runit', 'Uptime', 'PID']
records = []
try:
for compType in rDict:
for system in rDict[compType]:
for component in rDict[compType][system]:
record = [ system, component, compType.lower()[:-1] ]
if rDict[compType][system][component]['Setup']:
record.append( 'SetUp' )
else:
record.append( 'NotSetUp' )
if rDict[compType][system][component]['Installed']:
record.append( 'Installed' )
else:
record.append( 'NotInstalled' )
record.append( str( rDict[compType][system][component]['RunitStatus'] ) )
record.append( str( rDict[compType][system][component]['Timeup'] ) )
record.append( str( rDict[compType][system][component]['PID'] ) )
records.append( record )
printTable( fields, records )
except Exception as x:
print "Exception while gathering data for printing: %s" % str( x )
return S_OK()
def getAvailableSystems( self, extensions ):
"""
Get the list of all systems (in all given extensions) locally available
"""
systems = []
for extension in extensions:
extensionPath = os.path.join( DIRAC.rootPath, extension, '*System' )
for system in [ os.path.basename( k ).split( 'System' )[0] for k in glob.glob( extensionPath ) ]:
if system not in systems:
systems.append( system )
return systems
def getSoftwareComponents( self, extensions ):
"""
Get the list of all the components ( services and agents ) for which the software
is installed on the system
"""
# The Gateway does not need a handler
services = { 'Framework' : ['Gateway'] }
agents = {}
executors = {}
remainders = {}
resultDict = {}
remainingTypes = [ cType for cType in self.componentTypes if cType not in [ 'service', 'agent', 'executor' ] ]
resultIndexes = {}
# Components other than services, agents and executors
for cType in remainingTypes:
result = self._getSectionName( cType )
if not result[ 'OK' ]:
return result
resultIndexes[ cType ] = result[ 'Value' ]
resultDict[ resultIndexes[ cType ] ] = {}
remainders[ cType ] = {}
for extension in ['DIRAC'] + [ x + 'DIRAC' for x in extensions]:
if not os.path.exists( os.path.join( rootPath, extension ) ):
# Not all the extensions are necessarily installed in this self.instance
continue
systemList = os.listdir( os.path.join( rootPath, extension ) )
for sys in systemList:
system = sys.replace( 'System', '' )
try:
agentDir = os.path.join( rootPath, extension, sys, 'Agent' )
agentList = os.listdir( agentDir )
for agent in agentList:
if os.path.splitext( agent )[1] == ".py":
agentFile = os.path.join( agentDir, agent )
with open( agentFile, 'r' ) as afile:
body = afile.read()
if body.find( 'AgentModule' ) != -1 or body.find( 'OptimizerModule' ) != -1:
if not agents.has_key( system ):
agents[system] = []
agents[system].append( agent.replace( '.py', '' ) )
except OSError:
pass
try:
serviceDir = os.path.join( rootPath, extension, sys, 'Service' )
serviceList = os.listdir( serviceDir )
for service in serviceList:
if service.find( 'Handler' ) != -1 and os.path.splitext( service )[1] == '.py':
if not services.has_key( system ):
services[system] = []
if system == 'Configuration' and service == 'ConfigurationHandler.py':
service = 'ServerHandler.py'
services[system].append( service.replace( '.py', '' ).replace( 'Handler', '' ) )
except OSError:
pass
try:
executorDir = os.path.join( rootPath, extension, sys, 'Executor' )
executorList = os.listdir( executorDir )
for executor in executorList:
if os.path.splitext( executor )[1] == ".py":
executorFile = os.path.join( executorDir, executor )
with open( executorFile, 'r' ) as afile:
body = afile.read()
if body.find( 'OptimizerExecutor' ) != -1:
if not executors.has_key( system ):
executors[system] = []
executors[system].append( executor.replace( '.py', '' ) )
except OSError:
pass
# Rest of component types
for cType in remainingTypes:
try:
remainDir = os.path.join( rootPath, extension, sys, cType.title() )
remainList = os.listdir( remainDir )
for remainder in remainList:
if os.path.splitext( remainder )[1] == ".py":
if not remainders[ cType ].has_key( system ):
remainders[ cType ][system] = []
remainders[ cType ][system].append( remainder.replace( '.py', '' ) )
except OSError:
pass
resultDict['Services'] = services
resultDict['Agents'] = agents
resultDict['Executors'] = executors
for cType in remainingTypes:
resultDict[ resultIndexes[ cType ] ] = remainders[ cType ]
return S_OK( resultDict )
def getInstalledComponents( self ):
"""
Get the list of all the components ( services and agents )
installed on the system in the runit directory
"""
resultDict = {}
resultIndexes = {}
for cType in self.componentTypes:
result = self._getSectionName( cType )
if not result[ 'OK' ]:
return result
resultIndexes[ cType ] = result[ 'Value' ]
resultDict[ resultIndexes[ cType ] ] = {}
systemList = os.listdir( self.runitDir )
for system in systemList:
systemDir = os.path.join( self.runitDir, system )
components = os.listdir( systemDir )
for component in components:
try:
runFile = os.path.join( systemDir, component, 'run' )
rfile = open( runFile, 'r' )
body = rfile.read()
rfile.close()
for cType in self.componentTypes:
if body.find( 'dirac-%s' % ( cType ) ) != -1:
if not resultDict[ resultIndexes[ cType ] ].has_key( system ):
resultDict[ resultIndexes[ cType ] ][system] = []
resultDict[ resultIndexes[ cType ] ][system].append( component )
except IOError:
pass
return S_OK( resultDict )
def getSetupComponents( self ):
"""
Get the list of all the components ( services and agents )
set up for running with runsvdir in startup directory
"""
resultDict = {}
resultIndexes = {}
for cType in self.componentTypes:
result = self._getSectionName( cType )
if not result[ 'OK' ]:
return result
resultIndexes[ cType ] = result[ 'Value' ]
resultDict[ resultIndexes[ cType ] ] = {}
if not os.path.isdir( self.startDir ):
return S_ERROR( 'Startup Directory does not exit: %s' % self.startDir )
componentList = os.listdir( self.startDir )
for component in componentList:
try:
runFile = os.path.join( self.startDir, component, 'run' )
rfile = open( runFile, 'r' )
body = rfile.read()
rfile.close()
for cType in self.componentTypes:
if body.find( 'dirac-%s' % ( cType ) ) != -1:
system, compT = component.split( '_' )[0:2]
if not resultDict[ resultIndexes[ cType ] ].has_key( system ):
resultDict[ resultIndexes[ cType ] ][system] = []
resultDict[ resultIndexes[ cType ] ][system].append( compT )
except IOError:
pass
return S_OK( resultDict )
def getStartupComponentStatus( self, componentTupleList ):
"""
Get the list of all the components ( services and agents )
set up for running with runsvdir in startup directory
"""
try:
if componentTupleList:
cList = []
for componentTuple in componentTupleList:
cList.extend( glob.glob( os.path.join( self.startDir, '_'.join( componentTuple ) ) ) )
else:
cList = glob.glob( os.path.join( self.startDir, '*' ) )
except Exception:
error = 'Failed to parse List of Components'
gLogger.exception( error )
if self.exitOnError:
DIRAC.exit( -1 )
return S_ERROR( error )
result = self.execCommand( 0, ['runsvstat'] + cList )
if not result['OK']:
return result
output = result['Value'][1].strip().split( '\n' )
componentDict = {}
for line in output:
if not line:
continue
cname, routput = line.split( ':' )
cname = cname.replace( '%s/' % self.startDir, '' )
run = False
reResult = re.search( '^ run', routput )
if reResult:
run = True
down = False
reResult = re.search( '^ down', routput )
if reResult:
down = True
reResult = re.search( '([0-9]+) seconds', routput )
timeup = 0
if reResult:
timeup = reResult.group( 1 )
reResult = re.search( 'pid ([0-9]+)', routput )
pid = 0
if reResult:
pid = reResult.group( 1 )
runsv = "Not running"
if run or down:
runsv = "Running"
reResult = re.search( 'runsv not running', routput )
if reResult:
runsv = "Not running"
runDict = {}
runDict['CPU'] = -1
runDict['MEM'] = -1
runDict['VSZ'] = -1
runDict['RSS'] = -1
if pid: # check the process CPU usage and memory
# PID %CPU %MEM VSZ
result = self.execCommand( 0, ['ps', '-p', pid, 'u'] )
if result['OK'] and len( result['Value'] ) > 0:
stats = result['Value'][1]
values = re.findall( r"\d*\.\d+|\d+", stats )
if len( values ) > 0:
runDict['CPU'] = values[1]
runDict['MEM'] = values[2]
runDict['VSZ'] = values[3]
runDict['RSS'] = values[4]
runDict['Timeup'] = timeup
runDict['PID'] = pid
runDict['RunitStatus'] = "Unknown"
if run:
runDict['RunitStatus'] = "Run"
if down:
runDict['RunitStatus'] = "Down"
if runsv == "Not running":
runDict['RunitStatus'] = "NoRunitControl"
componentDict[cname] = runDict
return S_OK( componentDict )
def getComponentModule( self, system, component, compType ):
"""
Get the component software module
"""
self.setup = CSGlobals.getSetup()
self.instance = gConfig.getValue( cfgPath( 'DIRAC', 'Setups', self.setup, system ), '' )
if not self.instance:
return S_OK( component )
module = gConfig.getValue( cfgPath( 'Systems', system, self.instance, compType, component, 'Module' ), '' )
if not module:
module = component
return S_OK( module )
def getOverallStatus( self, extensions ):
"""
Get the list of all the components ( services and agents )
set up for running with runsvdir in startup directory
"""
result = self.getSoftwareComponents( extensions )
if not result['OK']:
return result
softDict = result['Value']
result = self.getSetupComponents()
if not result['OK']:
return result
setupDict = result['Value']
result = self.getInstalledComponents()
if not result['OK']:
return result
installedDict = result['Value']
result = self.getStartupComponentStatus( [] )
if not result['OK']:
return result
runitDict = result['Value']
# Collect the info now
resultDict = {}
resultIndexes = {}
for cType in self.componentTypes:
result = self._getSectionName( cType )
if not result[ 'OK' ]:
return result
resultIndexes[ cType ] = result[ 'Value' ]
resultDict[ resultIndexes[ cType ] ] = {}
for compType in resultIndexes.values():
if softDict.has_key( 'Services' ):
for system in softDict[compType]:
resultDict[compType][system] = {}
for component in softDict[compType][system]:
if system == 'Configuration' and component == 'Configuration':
# Fix to avoid missing CS due to different between Service name and Handler name
component = 'Server'
resultDict[compType][system][component] = {}
resultDict[compType][system][component]['Setup'] = False
resultDict[compType][system][component]['Installed'] = False
resultDict[compType][system][component]['RunitStatus'] = 'Unknown'
resultDict[compType][system][component]['Timeup'] = 0
resultDict[compType][system][component]['PID'] = 0
# TODO: why do we need a try here?
try:
if component in setupDict[compType][system]:
resultDict[compType][system][component]['Setup'] = True
except Exception:
pass
try:
if component in installedDict[compType][system]:
resultDict[compType][system][component]['Installed'] = True
except Exception:
pass
try:
compDir = system + '_' + component
if runitDict.has_key( compDir ):
resultDict[compType][system][component]['RunitStatus'] = runitDict[compDir]['RunitStatus']
resultDict[compType][system][component]['Timeup'] = runitDict[compDir]['Timeup']
resultDict[compType][system][component]['PID'] = runitDict[compDir]['PID']
resultDict[compType][system][component]['CPU'] = runitDict[compDir]['CPU']
resultDict[compType][system][component]['MEM'] = runitDict[compDir]['MEM']
resultDict[compType][system][component]['RSS'] = runitDict[compDir]['RSS']
resultDict[compType][system][component]['VSZ'] = runitDict[compDir]['VSZ']
except Exception:
# print str(x)
pass
# Installed components can be not the same as in the software list
if installedDict.has_key( 'Services' ):
for system in installedDict[compType]:
for component in installedDict[compType][system]:
if compType in resultDict:
if system in resultDict[compType]:
if component in resultDict[compType][system]:
continue
resultDict[compType][system][component] = {}
resultDict[compType][system][component]['Setup'] = False
resultDict[compType][system][component]['Installed'] = True
resultDict[compType][system][component]['RunitStatus'] = 'Unknown'
resultDict[compType][system][component]['Timeup'] = 0
resultDict[compType][system][component]['PID'] = 0
# TODO: why do we need a try here?
try:
if component in setupDict[compType][system]:
resultDict[compType][system][component]['Setup'] = True
except Exception:
pass
try:
compDir = system + '_' + component
if runitDict.has_key( compDir ):
resultDict[compType][system][component]['RunitStatus'] = runitDict[compDir]['RunitStatus']
resultDict[compType][system][component]['Timeup'] = runitDict[compDir]['Timeup']
resultDict[compType][system][component]['PID'] = runitDict[compDir]['PID']
resultDict[compType][system][component]['CPU'] = runitDict[compDir]['CPU']
resultDict[compType][system][component]['MEM'] = runitDict[compDir]['MEM']
resultDict[compType][system][component]['RSS'] = runitDict[compDir]['RSS']
resultDict[compType][system][component]['VSZ'] = runitDict[compDir]['VSZ']
except Exception:
# print str(x)
pass
return S_OK( resultDict )
def checkComponentModule( self, componentType, system, module ):
"""
Check existence of the given module
and if it inherits from the proper class
"""
if componentType == 'agent':
loader = ModuleLoader( "Agent", PathFinder.getAgentSection, AgentModule )
elif componentType == 'service':
loader = ModuleLoader( "Service", PathFinder.getServiceSection,
RequestHandler, moduleSuffix = "Handler" )
elif componentType == 'executor':
loader = ModuleLoader( "Executor", PathFinder.getExecutorSection, ExecutorModule )
else:
return S_ERROR( 'Unknown component type %s' % componentType )
return loader.loadModule( "%s/%s" % ( system, module ) )
def checkComponentSoftware( self, componentType, system, component, extensions ):
"""
Check the component software
"""
result = self.getSoftwareComponents( extensions )
if not result['OK']:
return result
softComp = result[ 'Value' ]
result = self._getSectionName( componentType )
if not result[ 'OK' ]:
return result
try:
softDict = softComp[ result[ 'Value' ] ]
except KeyError, e:
return S_ERROR( 'Unknown component type %s' % componentType )
if system in softDict and component in softDict[system]:
return S_OK()
return S_ERROR( 'Unknown Component %s/%s' % ( system, component ) )
def runsvctrlComponent( self, system, component, mode ):
"""
Execute runsvctrl and check status of the specified component
"""
if not mode in ['u', 'd', 'o', 'p', 'c', 'h', 'a', 'i', 'q', '1', '2', 't', 'k', 'x', 'e']:
return S_ERROR( 'Unknown runsvctrl mode "%s"' % mode )
startCompDirs = glob.glob( os.path.join( self.startDir, '%s_%s' % ( system, component ) ) )
# Make sure that the Configuration server restarts first and the SystemAdmin restarts last
tmpList = list( startCompDirs )
for comp in tmpList:
if "Framework_SystemAdministrator" in comp:
startCompDirs.append( startCompDirs.pop( startCompDirs.index( comp ) ) )
if "Configuration_Server" in comp:
startCompDirs.insert( 0, startCompDirs.pop( startCompDirs.index( comp ) ) )
startCompList = [ [k] for k in startCompDirs]
for startComp in startCompList:
result = self.execCommand( 0, ['runsvctrl', mode] + startComp )
if not result['OK']:
return result
time.sleep( 2 )
# Check the runsv status
if system == '*' or component == '*':
time.sleep( 10 )
# Final check
result = self.getStartupComponentStatus( [( system, component )] )
if not result['OK']:
gLogger.error( 'Failed to start the component %s %s' %(system, component) )
return S_ERROR( 'Failed to start the component' )
return result
def getLogTail( self, system, component, length = 100 ):
"""
Get the tail of the component log file
"""
retDict = {}
for startCompDir in glob.glob( os.path.join( self.startDir, '%s_%s' % ( system, component ) ) ):
compName = os.path.basename( startCompDir )
logFileName = os.path.join( startCompDir, 'log', 'current' )
if not os.path.exists( logFileName ):
retDict[compName] = 'No log file found'
else:
logFile = open( logFileName, 'r' )
lines = [ line.strip() for line in logFile.readlines() ]
logFile.close()
if len( lines ) < length:
retDict[compName] = '\n'.join( lines )
else:
retDict[compName] = '\n'.join( lines[-length:] )
return S_OK( retDict )
def setupSite( self, scriptCfg, cfg = None ):
"""
Setup a new site using the options defined
"""
# First we need to find out what needs to be installed
# by default use dirac.cfg, but if a cfg is given use it and
# merge it into the dirac.cfg
diracCfg = CFG()
installCfg = None
if cfg:
try:
installCfg = CFG()
installCfg.loadFromFile( cfg )
for section in ['DIRAC', 'LocalSite', cfgInstallSection]:
if installCfg.isSection( section ):
diracCfg.createNewSection( section, contents = installCfg[section] )
if self.instancePath != self.basePath:
if not diracCfg.isSection( 'LocalSite' ):
diracCfg.createNewSection( 'LocalSite' )
diracCfg.setOption( cfgPath( 'LocalSite', 'InstancePath' ), self.instancePath )
self._addCfgToDiracCfg( diracCfg )
except Exception: #pylint: disable=broad-except
error = 'Failed to load %s' % cfg
gLogger.exception( error )
if self.exitOnError:
DIRAC.exit( -1 )
return S_ERROR( error )
# Now get the necessary info from self.localCfg
setupSystems = self.localCfg.getOption( cfgInstallPath( 'Systems' ), ['Configuration', 'Framework'] )
installMySQLFlag = self.localCfg.getOption( cfgInstallPath( 'InstallMySQL' ), False )
setupDatabases = self.localCfg.getOption( cfgInstallPath( 'Databases' ), [] )
setupServices = [ k.split( '/' ) for k in self.localCfg.getOption( cfgInstallPath( 'Services' ), [] ) ]
setupAgents = [ k.split( '/' ) for k in self.localCfg.getOption( cfgInstallPath( 'Agents' ), [] ) ]
setupExecutors = [ k.split( '/' ) for k in self.localCfg.getOption( cfgInstallPath( 'Executors' ), [] ) ]
setupWeb = self.localCfg.getOption( cfgInstallPath( 'WebPortal' ), False )
setupWebApp = self.localCfg.getOption( cfgInstallPath( 'WebApp' ), True )
setupConfigurationMaster = self.localCfg.getOption( cfgInstallPath( 'ConfigurationMaster' ), False )
setupPrivateConfiguration = self.localCfg.getOption( cfgInstallPath( 'PrivateConfiguration' ), False )
setupConfigurationName = self.localCfg.getOption( cfgInstallPath( 'ConfigurationName' ), self.setup )
setupAddConfiguration = self.localCfg.getOption( cfgInstallPath( 'AddConfiguration' ), True )
for serviceTuple in setupServices:
error = ''
if len( serviceTuple ) != 2:
error = 'Wrong service specification: system/service'
# elif serviceTuple[0] not in setupSystems:
# error = 'System %s not available' % serviceTuple[0]
if error:
if self.exitOnError:
gLogger.error( error )
DIRAC.exit( -1 )
return S_ERROR( error )
serviceSysInstance = serviceTuple[0]
if not serviceSysInstance in setupSystems:
setupSystems.append( serviceSysInstance )
for agentTuple in setupAgents:
error = ''
if len( agentTuple ) != 2:
error = 'Wrong agent specification: system/agent'
# elif agentTuple[0] not in setupSystems:
# error = 'System %s not available' % agentTuple[0]
if error:
if self.exitOnError:
gLogger.error( error )
DIRAC.exit( -1 )
return S_ERROR( error )
agentSysInstance = agentTuple[0]
if not agentSysInstance in setupSystems:
setupSystems.append( agentSysInstance )
for executorTuple in setupExecutors:
error = ''
if len( executorTuple ) != 2:
error = 'Wrong executor specification: system/executor'
if error:
if self.exitOnError:
gLogger.error( error )
DIRAC.exit( -1 )
return S_ERROR( error )
executorSysInstance = executorTuple[0]
if not executorSysInstance in setupSystems:
setupSystems.append( executorSysInstance )
# And to find out the available extensions
result = self.getExtensions()
if not result['OK']:
return result
extensions = [ k.replace( 'DIRAC', '' ) for k in result['Value']]
# Make sure the necessary directories are there
if self.basePath != self.instancePath:
mkDir(self.instancePath)
instanceEtcDir = os.path.join( self.instancePath, 'etc' )
etcDir = os.path.dirname( self.cfgFile )
if not os.path.exists( instanceEtcDir ):
mkLink( etcDir, instanceEtcDir )
if os.path.realpath( instanceEtcDir ) != os.path.realpath( etcDir ):
error = 'Instance etc (%s) is not the same as DIRAC etc (%s)' % ( instanceEtcDir, etcDir )
if self.exitOnError:
gLogger.error( error )
DIRAC.exit( -1 )
return S_ERROR( error )
# if any server or agent needs to be install we need the startup directory and runsvdir running
if setupServices or setupAgents or setupExecutors or setupWeb:
if not os.path.exists( self.startDir ):
mkDir(self.startDir)
# And need to make sure runsvdir is running
result = self.execCommand( 0, ['ps', '-ef'] )
if not result['OK']:
if self.exitOnError:
gLogger.error( 'Failed to verify runsvdir running', result['Message'] )
DIRAC.exit( -1 )
return S_ERROR( result['Message'] )
processList = result['Value'][1].split( '\n' )
cmd = 'runsvdir %s' % self.startDir
cmdFound = False
for process in processList:
if process.find( cmd ) != -1:
cmdFound = True
if not cmdFound:
gLogger.notice( 'Starting runsvdir ...' )
os.system( "runsvdir %s 'log: DIRAC runsv' &" % self.startDir )
if ['Configuration', 'Server'] in setupServices and setupConfigurationMaster:
# This server hosts the Master of the CS
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
gLogger.notice( 'Installing Master Configuration Server' )
cfg = self.__getCfg( cfgPath( 'DIRAC', 'Setups', self.setup ), 'Configuration', self.instance )
self._addCfgToDiracCfg( cfg )
cfg = self.__getCfg( cfgPath( 'DIRAC', 'Configuration' ), 'Master' , 'yes' )
cfg.setOption( cfgPath( 'DIRAC', 'Configuration', 'Name' ) , setupConfigurationName )
serversCfgPath = cfgPath( 'DIRAC', 'Configuration', 'Servers' )
if not self.localCfg.getOption( serversCfgPath , [] ):
serverUrl = 'dips://%s:9135/Configuration/Server' % self.host
cfg.setOption( serversCfgPath, serverUrl )
gConfigurationData.setOptionInCFG( serversCfgPath, serverUrl )
instanceOptionPath = cfgPath( 'DIRAC', 'Setups', self.setup )
instanceCfg = self.__getCfg( instanceOptionPath, 'Configuration', self.instance )
cfg = cfg.mergeWith( instanceCfg )
self._addCfgToDiracCfg( cfg )
result = self.getComponentCfg( 'service', 'Configuration', 'Server', self.instance, extensions, addDefaultOptions = True )
if not result['OK']:
if self.exitOnError:
DIRAC.exit( -1 )
else:
return result
compCfg = result['Value']
cfg = cfg.mergeWith( compCfg )
gConfigurationData.mergeWithLocal( cfg )
self.addDefaultOptionsToComponentCfg( 'service', 'Configuration', 'Server', [] )
if installCfg:
centralCfg = self._getCentralCfg( installCfg )
else:
centralCfg = self._getCentralCfg( self.localCfg )
self._addCfgToLocalCS( centralCfg )
self.setupComponent( 'service', 'Configuration', 'Server', [], checkModule = False )
self.runsvctrlComponent( 'Configuration', 'Server', 't' )
while ['Configuration', 'Server'] in setupServices:
setupServices.remove( ['Configuration', 'Server'] )
time.sleep( 5 )
# Now need to check if there is valid CS to register the info
result = scriptCfg.enableCS()
if not result['OK']:
if self.exitOnError:
DIRAC.exit( -1 )
return result
cfgClient = CSAPI()
if not cfgClient.initialize():
error = 'Configuration Server not defined'
if self.exitOnError:
gLogger.error( error )
DIRAC.exit( -1 )
return S_ERROR( error )
# We need to make sure components are connecting to the Master CS, that is the only one being update
localServers = self.localCfg.getOption( cfgPath( 'DIRAC', 'Configuration', 'Servers' ) )
masterServer = gConfig.getValue( cfgPath( 'DIRAC', 'Configuration', 'MasterServer' ), '' )
initialCfg = self.__getCfg( cfgPath( 'DIRAC', 'Configuration' ), 'Servers' , localServers )
masterCfg = self.__getCfg( cfgPath( 'DIRAC', 'Configuration' ), 'Servers' , masterServer )
self._addCfgToDiracCfg( masterCfg )
# 1.- Setup the instances in the CS
# If the Configuration Server used is not the Master, it can take some time for this
# info to be propagated, this may cause the later self.setup to fail
if setupAddConfiguration:
gLogger.notice( 'Registering System instances' )
for system in setupSystems:
self.addSystemInstance( system, self.instance, self.setup, True )
for system, service in setupServices:
if not self.addDefaultOptionsToCS( None, 'service', system, service, extensions, overwrite = True )['OK']:
# If we are not allowed to write to the central CS, add the configuration to the local file
self.addDefaultOptionsToComponentCfg( 'service', system, service, extensions )
for system, agent in setupAgents:
if not self.addDefaultOptionsToCS( None, 'agent', system, agent, extensions, overwrite = True )['OK']:
# If we are not allowed to write to the central CS, add the configuration to the local file
self.addDefaultOptionsToComponentCfg( 'agent', system, agent, extensions )
for system, executor in setupExecutors:
if not self.addDefaultOptionsToCS( None, 'executor', system, executor, extensions, overwrite = True )['OK']:
# If we are not allowed to write to the central CS, add the configuration to the local file
self.addDefaultOptionsToComponentCfg( 'executor', system, executor, extensions )
else:
gLogger.warn( 'Configuration parameters definition is not requested' )
if ['Configuration', 'Server'] in setupServices and setupPrivateConfiguration:
cfg = self.__getCfg( cfgPath( 'DIRAC', 'Configuration' ), 'AutoPublish' , 'no' )
self._addCfgToDiracCfg( cfg )
# 2.- Check if MySQL is to be installed
if installMySQLFlag:
gLogger.notice( 'Installing MySQL' )
self.getMySQLPasswords()
self.installMySQL()
# 3.- Install requested Databases
# if MySQL is not installed locally, we assume a host is given
if setupDatabases:
result = self.getDatabases()
if not result['OK']:
if self.exitOnError:
gLogger.error( 'Failed to get databases', result['Message'] )
DIRAC.exit( -1 )
return result
installedDatabases = result['Value']
result = self.getAvailableDatabases( CSGlobals.getCSExtensions() )
if not result[ 'OK' ]:
return result
dbDict = result['Value']
for dbName in setupDatabases:
if dbName not in installedDatabases:
result = self.installDatabase( dbName )
if not result['OK']:
gLogger.error( result['Message'] )
DIRAC.exit( -1 )
extension, system = result['Value']
gLogger.notice( 'Database %s from %s/%s installed' % ( dbName, extension, system ) )
else:
gLogger.notice( 'Database %s already installed' % dbName )
dbSystem = dbDict[dbName]['System']
result = self.addDatabaseOptionsToCS( None, dbSystem, dbName, overwrite = True )
if not result['OK']:
gLogger.error( 'Database %s CS registration failed: %s' % ( dbName, result['Message'] ) )
if self.mysqlPassword:
if not self._addMySQLToDiracCfg():
error = 'Failed to add MySQL user/password to local configuration'
if self.exitOnError:
gLogger.error( error )
DIRAC.exit( -1 )
return S_ERROR( error )
if self.noSQLPassword:
if not self._addNoSQLToDiracCfg():
error = 'Failed to add NoSQL user/password to local configuration'
if self.exitOnError:
gLogger.error( error )
DIRAC.exit( -1 )
return S_ERROR( error )
# 4.- Then installed requested services
for system, service in setupServices:
result = self.setupComponent( 'service', system, service, extensions )
if not result['OK']:
gLogger.error( result['Message'] )
continue
# 5.- Now the agents
for system, agent in setupAgents:
result = self.setupComponent( 'agent', system, agent, extensions )
if not result['OK']:
gLogger.error( result['Message'] )
continue
# 6.- Now the executors
for system, executor in setupExecutors:
result = self.setupComponent( 'executor', system, executor, extensions )
if not result['OK']:
gLogger.error( result['Message'] )
continue
# 7.- And finally the Portal
if setupWeb:
if setupWebApp:
self.setupNewPortal()
else:
self.setupPortal()
if localServers != masterServer:
self._addCfgToDiracCfg( initialCfg )
for system, service in setupServices:
self.runsvctrlComponent( system, service, 't' )
for system, agent in setupAgents:
self.runsvctrlComponent( system, agent, 't' )
for system, executor in setupExecutors:
self.runsvctrlComponent( system, executor, 't' )
return S_OK()
def _getSectionName( self, compType ):
"""
Returns the section name for a component in the CS
For self.instance, the section for service is Services,
whereas the section for agent is Agents
"""
return S_OK( '%ss' % ( compType.title() ) )
def _createRunitLog( self, runitCompDir ):
self.controlDir = os.path.join( runitCompDir, 'control' )
mkDir( self.controlDir )
logDir = os.path.join( runitCompDir, 'log' )
mkDir( logDir )
logConfigFile = os.path.join( logDir, 'config' )
with open( logConfigFile, 'w' ) as fd:
fd.write(
"""s10000000
n20
""" )
logRunFile = os.path.join( logDir, 'run' )
with open( logRunFile, 'w' ) as fd:
fd.write(
"""#!/bin/bash
#
rcfile=%(bashrc)s
[ -e $rcfile ] && source $rcfile
#
exec svlogd .
""" % { 'bashrc' : os.path.join( self.instancePath, 'bashrc' ) } )
os.chmod( logRunFile, self.gDefaultPerms )
def installComponent( self, componentType, system, component, extensions, componentModule = '', checkModule = True ):
"""
Install runit directory for the specified component
"""
# Check if the component is already installed
runitCompDir = os.path.join( self.runitDir, system, component )
if os.path.exists( runitCompDir ):
msg = "%s %s_%s already installed" % ( componentType, system, component )
gLogger.notice( msg )
return S_OK( runitCompDir )
# Check that the software for the component is installed
# Any "Load" or "Module" option in the configuration defining what modules the given "component"
# needs to load will be taken care of by self.checkComponentModule.
if checkModule:
cModule = componentModule
if not cModule:
cModule = component
result = self.checkComponentModule( componentType, system, cModule )
if not result['OK']:
if not self.checkComponentSoftware( componentType, system, cModule, extensions )['OK'] and componentType != 'executor':
error = 'Software for %s %s/%s is not installed' % ( componentType, system, component )
if self.exitOnError:
gLogger.error( error )
DIRAC.exit( -1 )
return S_ERROR( error )
gLogger.notice( 'Installing %s %s/%s' % ( componentType, system, component ) )
# Retrieve bash variables to be set
result = gConfig.getOption( 'DIRAC/Setups/%s/%s' % ( CSGlobals.getSetup(), system ) )
if not result[ 'OK' ]:
return result
self.instance = result[ 'Value' ]
specialOptions = {}
if componentModule:
specialOptions['Module'] = componentModule
result = self.getComponentCfg( componentType, system, component, self.instance, extensions,
specialOptions = specialOptions )
if not result[ 'OK' ]:
return result
compCfg = result[ 'Value' ]
result = self._getSectionName( componentType )
if not result[ 'OK' ]:
return result
section = result[ 'Value' ]
bashVars = ''
if compCfg.isSection( 'Systems/%s/%s/%s/%s/Environment' % ( system, self.instance, section, component ) ):
dictionary = compCfg.getAsDict()
bashSection = dictionary[ 'Systems' ][ system ][ self.instance ][ section ][ component ][ 'BashVariables' ]
for var in bashSection:
bashVars = '%s\nexport %s=%s' % ( bashVars, var, bashSection[ var ] )
# Now do the actual installation
try:
componentCfg = os.path.join( self.linkedRootPath, 'etc', '%s_%s.cfg' % ( system, component ) )
if not os.path.exists( componentCfg ):
fd = open( componentCfg, 'w' )
fd.close()
self._createRunitLog( runitCompDir )
runFile = os.path.join( runitCompDir, 'run' )
fd = open( runFile, 'w' )
fd.write(
"""#!/bin/bash
rcfile=%(bashrc)s
[ -e $rcfile ] && source $rcfile
#
exec 2>&1
#
[ "%(componentType)s" = "agent" ] && renice 20 -p $$
#%(bashVariables)s
#
exec python $DIRAC/DIRAC/Core/scripts/dirac-%(componentType)s.py %(system)s/%(component)s %(componentCfg)s < /dev/null
""" % {'bashrc': os.path.join( self.instancePath, 'bashrc' ),
'bashVariables': bashVars,
'componentType': componentType,
'system' : system,
'component': component,
'componentCfg': componentCfg } )
fd.close()
os.chmod( runFile, self.gDefaultPerms )
cTypeLower = componentType.lower()
if cTypeLower == 'agent' or cTypeLower == 'consumer':
stopFile = os.path.join( runitCompDir, 'control', 't' ) # This is, e.g., /opt/dirac/runit/WorkfloadManagementSystem/Matcher/control/t
controlDir = self.runitDir.replace('runit', 'control') # This is, e.g., /opt/dirac/control/WorkfloadManagementSystem/Matcher/
with open( stopFile, 'w' ) as fd:
fd.write( """#!/bin/bash
echo %(controlDir)s/%(system)s/%(component)s/stop_%(type)s
touch %(controlDir)s/%(system)s/%(component)s/stop_%(type)s
""" % { 'controlDir': controlDir,
'system' : system,
'component': component,
'type': cTypeLower } )
os.chmod( stopFile, self.gDefaultPerms )
except Exception:
error = 'Failed to prepare self.setup for %s %s/%s' % ( componentType, system, component )
gLogger.exception( error )
if self.exitOnError:
DIRAC.exit( -1 )
return S_ERROR( error )
result = self.execCommand( 5, [runFile] )
gLogger.notice( result['Value'][1] )
return S_OK( runitCompDir )
def setupComponent( self, componentType, system, component, extensions,
componentModule = '', checkModule = True ):
"""
Install and create link in startup
"""
result = self.installComponent( componentType, system, component, extensions, componentModule, checkModule )
if not result['OK']:
return result
# Create the startup entry now
runitCompDir = result['Value']
startCompDir = os.path.join( self.startDir, '%s_%s' % ( system, component ) )
mkDir(self.startDir)
if not os.path.lexists( startCompDir ):
gLogger.notice( 'Creating startup link at', startCompDir )
mkLink( runitCompDir, startCompDir )
time.sleep( 10 )
# Check the runsv status
start = time.time()
while ( time.time() - 20 ) < start:
result = self.getStartupComponentStatus( [ ( system, component )] )
if not result['OK']:
continue
if result['Value'] and result['Value']['%s_%s' % ( system, component )]['RunitStatus'] == "Run":
break
time.sleep( 1 )
# Final check
result = self.getStartupComponentStatus( [( system, component )] )
if not result['OK']:
return S_ERROR( 'Failed to start the component %s_%s' % ( system, component ) )
resDict = {}
resDict['ComponentType'] = componentType
resDict['RunitStatus'] = result['Value']['%s_%s' % ( system, component )]['RunitStatus']
return S_OK( resDict )
def unsetupComponent( self, system, component ):
"""
Remove link from startup
"""
for startCompDir in glob.glob( os.path.join( self.startDir, '%s_%s' % ( system, component ) ) ):
try:
os.unlink( startCompDir )
except Exception:
gLogger.exception()
return S_OK()
def uninstallComponent( self, system, component, removeLogs ):
"""
Remove startup and runit directories
"""
result = self.runsvctrlComponent( system, component, 'd' )
if not result['OK']:
pass
result = self.unsetupComponent( system, component )
if removeLogs:
for runitCompDir in glob.glob( os.path.join( self.runitDir, system, component ) ):
try:
shutil.rmtree( runitCompDir )
except Exception:
gLogger.exception()
result = self.removeComponentOptionsFromCS( system, component )
if not result [ 'OK' ]:
return result
return S_OK()
def installPortal( self ):
"""
Install runit directories for the Web Portal
"""
# Check that the software for the Web Portal is installed
error = ''
webDir = os.path.join( self.linkedRootPath, 'Web' )
if not os.path.exists( webDir ):
error = 'Web extension not installed at %s' % webDir
if self.exitOnError:
gLogger.error( error )
DIRAC.exit( -1 )
return S_ERROR( error )
# First the lighthttpd server
# Check if the component is already installed
runitHttpdDir = os.path.join( self.runitDir, 'Web', 'httpd' )
runitPasterDir = os.path.join( self.runitDir, 'Web', 'paster' )
if os.path.exists( runitHttpdDir ):
msg = "lighthttpd already installed"
gLogger.notice( msg )
else:
gLogger.notice( 'Installing Lighttpd' )
# Now do the actual installation
try:
self._createRunitLog( runitHttpdDir )
runFile = os.path.join( runitHttpdDir, 'run' )
fd = open( runFile, 'w' )
fd.write(
"""#!/bin/bash
rcfile=%(bashrc)s
[ -e $rcfile ] && source $rcfile
#
exec 2>&1
#
| exec lighttpdSvc.sh < /dev/null | 8,611 | lcc_e | python | null | bcd12fc83b049649e4609d18f3c25490db2d34e90d3517d7 |
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_vm
short_description: Module to manage Virtual Machines in oVirt/RHV
version_added: "2.2"
author:
- Ondra Machacek (@machacekondra)
description:
- This module manages whole lifecycle of the Virtual Machine(VM) in oVirt/RHV.
- Since VM can hold many states in oVirt/RHV, this see notes to see how the states of the VM are handled.
options:
name:
description:
- Name of the Virtual Machine to manage.
- If VM don't exists C(name) is required. Otherwise C(id) or C(name) can be used.
id:
description:
- ID of the Virtual Machine to manage.
state:
description:
- Should the Virtual Machine be running/stopped/present/absent/suspended/next_run/registered/exported.
When C(state) is I(registered) and the unregistered VM's name
belongs to an already registered in engine VM in the same DC
then we fail to register the unregistered template.
- I(present) state will create/update VM and don't change its state if it already exists.
- I(running) state will create/update VM and start it.
- I(next_run) state updates the VM and if the VM has next run configuration it will be rebooted.
- Please check I(notes) to more detailed description of states.
- I(exported) state will export the VM to export domain or as OVA.
- I(registered) is supported since 2.4.
choices: [ absent, next_run, present, registered, running, stopped, suspended, exported ]
default: present
cluster:
description:
- Name of the cluster, where Virtual Machine should be created.
- Required if creating VM.
allow_partial_import:
description:
- Boolean indication whether to allow partial registration of Virtual Machine when C(state) is registered.
type: bool
version_added: "2.4"
vnic_profile_mappings:
description:
- "Mapper which maps an external virtual NIC profile to one that exists in the engine when C(state) is registered.
vnic_profile is described by the following dictionary:"
- "C(source_network_name): The network name of the source network."
- "C(source_profile_name): The profile name related to the source network."
- "C(target_profile_id): The id of the target profile id to be mapped to in the engine."
version_added: "2.5"
cluster_mappings:
description:
- "Mapper which maps cluster name between VM's OVF and the destination cluster this VM should be registered to,
relevant when C(state) is registered.
Cluster mapping is described by the following dictionary:"
- "C(source_name): The name of the source cluster."
- "C(dest_name): The name of the destination cluster."
version_added: "2.5"
role_mappings:
description:
- "Mapper which maps role name between VM's OVF and the destination role this VM should be registered to,
relevant when C(state) is registered.
Role mapping is described by the following dictionary:"
- "C(source_name): The name of the source role."
- "C(dest_name): The name of the destination role."
version_added: "2.5"
domain_mappings:
description:
- "Mapper which maps aaa domain name between VM's OVF and the destination aaa domain this VM should be registered to,
relevant when C(state) is registered.
The aaa domain mapping is described by the following dictionary:"
- "C(source_name): The name of the source aaa domain."
- "C(dest_name): The name of the destination aaa domain."
version_added: "2.5"
affinity_group_mappings:
description:
- "Mapper which maps affinty name between VM's OVF and the destination affinity this VM should be registered to,
relevant when C(state) is registered."
version_added: "2.5"
affinity_label_mappings:
description:
- "Mappper which maps affinity label name between VM's OVF and the destination label this VM should be registered to,
relevant when C(state) is registered."
version_added: "2.5"
lun_mappings:
description:
- "Mapper which maps lun between VM's OVF and the destination lun this VM should contain, relevant when C(state) is registered.
lun_mappings is described by the following dictionary:
- C(logical_unit_id): The logical unit number to identify a logical unit,
- C(logical_unit_port): The port being used to connect with the LUN disk.
- C(logical_unit_portal): The portal being used to connect with the LUN disk.
- C(logical_unit_address): The address of the block storage host.
- C(logical_unit_target): The iSCSI specification located on an iSCSI server
- C(logical_unit_username): Username to be used to connect to the block storage host.
- C(logical_unit_password): Password to be used to connect to the block storage host.
- C(storage_type): The storage type which the LUN reside on (iscsi or fcp)"
version_added: "2.5"
reassign_bad_macs:
description:
- "Boolean indication whether to reassign bad macs when C(state) is registered."
type: bool
version_added: "2.5"
template:
description:
- Name of the template, which should be used to create Virtual Machine.
- Required if creating VM.
- If template is not specified and VM doesn't exist, VM will be created from I(Blank) template.
template_version:
description:
- Version number of the template to be used for VM.
- By default the latest available version of the template is used.
version_added: "2.3"
use_latest_template_version:
description:
- Specify if latest template version should be used, when running a stateless VM.
- If this parameter is set to I(yes) stateless VM is created.
type: bool
version_added: "2.3"
storage_domain:
description:
- Name of the storage domain where all template disks should be created.
- This parameter is considered only when C(template) is provided.
- IMPORTANT - This parameter is not idempotent, if the VM exists and you specfiy different storage domain,
disk won't move.
version_added: "2.4"
disk_format:
description:
- Specify format of the disk.
- If C(cow) format is used, disk will by created as sparse, so space will be allocated for the volume as needed, also known as I(thin provision).
- If C(raw) format is used, disk storage will be allocated right away, also known as I(preallocated).
- Note that this option isn't idempotent as it's not currently possible to change format of the disk via API.
- This parameter is considered only when C(template) and C(storage domain) is provided.
choices: [ cow, raw ]
default: cow
version_added: "2.4"
memory:
description:
- Amount of memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
- Default value is set by engine.
memory_guaranteed:
description:
- Amount of minimal guaranteed memory of the Virtual Machine.
Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
- C(memory_guaranteed) parameter can't be lower than C(memory) parameter.
- Default value is set by engine.
memory_max:
description:
- Upper bound of virtual machine memory up to which memory hot-plug can be performed.
Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
- Default value is set by engine.
version_added: "2.5"
cpu_shares:
description:
- Set a CPU shares for this Virtual Machine.
- Default value is set by oVirt/RHV engine.
cpu_cores:
description:
- Number of virtual CPUs cores of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
cpu_sockets:
description:
- Number of virtual CPUs sockets of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
cpu_threads:
description:
- Number of virtual CPUs sockets of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
version_added: "2.5"
type:
description:
- Type of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
- I(high_performance) is supported since Ansible 2.5 and oVirt/RHV 4.2.
choices: [ desktop, server, high_performance ]
quota_id:
description:
- "Virtual Machine quota ID to be used for disk. By default quota is chosen by oVirt/RHV engine."
version_added: "2.5"
operating_system:
description:
- Operating system of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
- "Possible values: debian_7, freebsd, freebsdx64, other, other_linux,
other_linux_ppc64, other_ppc64, rhel_3, rhel_4, rhel_4x64, rhel_5, rhel_5x64,
rhel_6, rhel_6x64, rhel_6_ppc64, rhel_7x64, rhel_7_ppc64, sles_11, sles_11_ppc64,
ubuntu_12_04, ubuntu_12_10, ubuntu_13_04, ubuntu_13_10, ubuntu_14_04, ubuntu_14_04_ppc64,
windows_10, windows_10x64, windows_2003, windows_2003x64, windows_2008, windows_2008x64,
windows_2008r2x64, windows_2008R2x64, windows_2012x64, windows_2012R2x64, windows_7,
windows_7x64, windows_8, windows_8x64, windows_xp"
boot_devices:
description:
- List of boot devices which should be used to boot. For example C([ cdrom, hd ]).
- Default value is set by oVirt/RHV engine.
choices: [ cdrom, hd, network ]
boot_menu:
description:
- "I(True) enable menu to select boot device, I(False) to disable it. By default is chosen by oVirt/RHV engine."
type: bool
version_added: "2.5"
usb_support:
description:
- "I(True) enable USB support, I(False) to disable it. By default is chosen by oVirt/RHV engine."
type: bool
version_added: "2.5"
serial_console:
description:
- "I(True) enable VirtIO serial console, I(False) to disable it. By default is chosen by oVirt/RHV engine."
type: bool
version_added: "2.5"
sso:
description:
- "I(True) enable Single Sign On by Guest Agent, I(False) to disable it. By default is chosen by oVirt/RHV engine."
type: bool
version_added: "2.5"
host:
description:
- Specify host where Virtual Machine should be running. By default the host is chosen by engine scheduler.
- This parameter is used only when C(state) is I(running) or I(present).
high_availability:
description:
- If I(yes) Virtual Machine will be set as highly available.
- If I(no) Virtual Machine won't be set as highly available.
- If no value is passed, default value is set by oVirt/RHV engine.
type: bool
high_availability_priority:
description:
- Indicates the priority of the virtual machine inside the run and migration queues.
Virtual machines with higher priorities will be started and migrated before virtual machines with lower
priorities. The value is an integer between 0 and 100. The higher the value, the higher the priority.
- If no value is passed, default value is set by oVirt/RHV engine.
version_added: "2.5"
lease:
description:
- Name of the storage domain this virtual machine lease reside on.
- NOTE - Supported since oVirt 4.1.
version_added: "2.4"
custom_compatibility_version:
description:
- "Enables a virtual machine to be customized to its own compatibility version. If
'C(custom_compatibility_version)' is set, it overrides the cluster's compatibility version
for this particular virtual machine."
version_added: "2.7"
host_devices:
description:
- Single Root I/O Virtualization - technology that allows single device to expose multiple endpoints that can be passed to VMs
- host_devices is an list which contain dictinary with name and state of device
version_added: "2.7"
delete_protected:
description:
- If I(yes) Virtual Machine will be set as delete protected.
- If I(no) Virtual Machine won't be set as delete protected.
- If no value is passed, default value is set by oVirt/RHV engine.
type: bool
stateless:
description:
- If I(yes) Virtual Machine will be set as stateless.
- If I(no) Virtual Machine will be unset as stateless.
- If no value is passed, default value is set by oVirt/RHV engine.
type: bool
clone:
description:
- If I(yes) then the disks of the created virtual machine will be cloned and independent of the template.
- This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before.
type: bool
default: 'no'
clone_permissions:
description:
- If I(yes) then the permissions of the template (only the direct ones, not the inherited ones)
will be copied to the created virtual machine.
- This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before.
type: bool
default: 'no'
cd_iso:
description:
- ISO file from ISO storage domain which should be attached to Virtual Machine.
- If you pass empty string the CD will be ejected from VM.
- If used with C(state) I(running) or I(present) and VM is running the CD will be attached to VM.
- If used with C(state) I(running) or I(present) and VM is down the CD will be attached to VM persistently.
force:
description:
- Please check to I(Synopsis) to more detailed description of force parameter, it can behave differently
in different situations.
type: bool
default: 'no'
nics:
description:
- List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary.
- C(name) - Name of the NIC.
- C(profile_name) - Profile name where NIC should be attached.
- C(interface) - Type of the network interface. One of following I(virtio), I(e1000), I(rtl8139), default is I(virtio).
- C(mac_address) - Custom MAC address of the network interface, by default it's obtained from MAC pool.
- NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs.
To manage NICs of the VM in more depth please use M(ovirt_nics) module instead.
disks:
description:
- List of disks, which should be attached to Virtual Machine. Disk is described by following dictionary.
- C(name) - Name of the disk. Either C(name) or C(id) is required.
- C(id) - ID of the disk. Either C(name) or C(id) is required.
- C(interface) - Interface of the disk, either I(virtio) or I(IDE), default is I(virtio).
- C(bootable) - I(True) if the disk should be bootable, default is non bootable.
- C(activate) - I(True) if the disk should be activated, default is activated.
- NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only attach disks.
To manage disks of the VM in more depth please use M(ovirt_disks) module instead.
sysprep:
description:
- Dictionary with values for Windows Virtual Machine initialization using sysprep.
- C(host_name) - Hostname to be set to Virtual Machine when deployed.
- C(active_directory_ou) - Active Directory Organizational Unit, to be used for login of user.
- C(org_name) - Organization name to be set to Windows Virtual Machine.
- C(domain) - Domain to be set to Windows Virtual Machine.
- C(timezone) - Timezone to be set to Windows Virtual Machine.
- C(ui_language) - UI language of the Windows Virtual Machine.
- C(system_locale) - System localization of the Windows Virtual Machine.
- C(input_locale) - Input localization of the Windows Virtual Machine.
- C(windows_license_key) - License key to be set to Windows Virtual Machine.
- C(user_name) - Username to be used for set password to Windows Virtual Machine.
- C(root_password) - Password to be set for username to Windows Virtual Machine.
cloud_init:
description:
- Dictionary with values for Unix-like Virtual Machine initialization using cloud init.
- C(host_name) - Hostname to be set to Virtual Machine when deployed.
- C(timezone) - Timezone to be set to Virtual Machine when deployed.
- C(user_name) - Username to be used to set password to Virtual Machine when deployed.
- C(root_password) - Password to be set for user specified by C(user_name) parameter.
- C(authorized_ssh_keys) - Use this SSH keys to login to Virtual Machine.
- C(regenerate_ssh_keys) - If I(True) SSH keys will be regenerated on Virtual Machine.
- C(custom_script) - Cloud-init script which will be executed on Virtual Machine when deployed. This is appended to the end of the
cloud-init script generated by any other options.
- C(dns_servers) - DNS servers to be configured on Virtual Machine.
- C(dns_search) - DNS search domains to be configured on Virtual Machine.
- C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
- C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine.
- C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine.
- C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine.
- C(nic_name) - Set name to network interface of Virtual Machine.
- C(nic_on_boot) - If I(True) network interface will be set to start on boot.
cloud_init_nics:
description:
- List of dictionaries representing network interfaces to be setup by cloud init.
- This option is used, when user needs to setup more network interfaces via cloud init.
- If one network interface is enough, user should use C(cloud_init) I(nic_*) parameters. C(cloud_init) I(nic_*) parameters
are merged with C(cloud_init_nics) parameters.
- Dictionary can contain following values.
- C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
- C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine.
- C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine.
- C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine.
- C(nic_name) - Set name to network interface of Virtual Machine.
- C(nic_on_boot) - If I(True) network interface will be set to start on boot.
version_added: "2.3"
cloud_init_persist:
description:
- "If I(true) the C(cloud_init) or C(sysprep) parameters will be saved for the virtual machine
and won't be virtual machine won't be started as run-once."
type: bool
version_added: "2.5"
aliases: [ 'sysprep_persist' ]
kernel_params_persist:
description:
- "If I(true) C(kernel_params), C(initrd_path) and C(kernel_path) will persist in virtual machine configuration,
if I(False) it will be used for run once."
type: bool
version_added: "2.8"
kernel_path:
description:
- Path to a kernel image used to boot the virtual machine.
- Kernel image must be stored on either the ISO domain or on the host's storage.
version_added: "2.3"
initrd_path:
description:
- Path to an initial ramdisk to be used with the kernel specified by C(kernel_path) option.
- Ramdisk image must be stored on either the ISO domain or on the host's storage.
version_added: "2.3"
kernel_params:
description:
- Kernel command line parameters (formatted as string) to be used with the kernel specified by C(kernel_path) option.
version_added: "2.3"
instance_type:
description:
- Name of virtual machine's hardware configuration.
- By default no instance type is used.
version_added: "2.3"
description:
description:
- Description of the Virtual Machine.
version_added: "2.3"
comment:
description:
- Comment of the Virtual Machine.
version_added: "2.3"
timezone:
description:
- Sets time zone offset of the guest hardware clock.
- For example C(Etc/GMT)
version_added: "2.3"
serial_policy:
description:
- Specify a serial number policy for the Virtual Machine.
- Following options are supported.
- C(vm) - Sets the Virtual Machine's UUID as its serial number.
- C(host) - Sets the host's UUID as the Virtual Machine's serial number.
- C(custom) - Allows you to specify a custom serial number in C(serial_policy_value).
choices: ['vm', 'host', 'custom']
version_added: "2.3"
serial_policy_value:
description:
- Allows you to specify a custom serial number.
- This parameter is used only when C(serial_policy) is I(custom).
version_added: "2.3"
vmware:
description:
- Dictionary of values to be used to connect to VMware and import
a virtual machine to oVirt.
- Dictionary can contain following values.
- C(username) - The username to authenticate against the VMware.
- C(password) - The password to authenticate against the VMware.
- C(url) - The URL to be passed to the I(virt-v2v) tool for conversion.
For example I(vpx://wmware_user@vcenter-host/DataCenter/Cluster/esxi-host?no_verify=1)
- C(drivers_iso) - The name of the ISO containing drivers that can
be used during the I(virt-v2v) conversion process.
- C(sparse) - Specifies the disk allocation policy of the resulting
virtual machine. I(true) for sparse, I(false) for preallocated.
Default value is I(true).
- C(storage_domain) - Specifies the target storage domain for
converted disks. This is required parameter.
version_added: "2.3"
xen:
description:
- Dictionary of values to be used to connect to XEN and import
a virtual machine to oVirt.
- Dictionary can contain following values.
- C(url) - The URL to be passed to the I(virt-v2v) tool for conversion.
For example I(xen+ssh://root@zen.server). This is required parameter.
- C(drivers_iso) - The name of the ISO containing drivers that can
be used during the I(virt-v2v) conversion process.
- C(sparse) - Specifies the disk allocation policy of the resulting
virtual machine. I(true) for sparse, I(false) for preallocated.
Default value is I(true).
- C(storage_domain) - Specifies the target storage domain for
converted disks. This is required parameter.
version_added: "2.3"
kvm:
description:
- Dictionary of values to be used to connect to kvm and import
a virtual machine to oVirt.
- Dictionary can contain following values.
- C(name) - The name of the KVM virtual machine.
- C(username) - The username to authenticate against the KVM.
- C(password) - The password to authenticate against the KVM.
- C(url) - The URL to be passed to the I(virt-v2v) tool for conversion.
For example I(qemu:///system). This is required parameter.
- C(drivers_iso) - The name of the ISO containing drivers that can
be used during the I(virt-v2v) conversion process.
- C(sparse) - Specifies the disk allocation policy of the resulting
virtual machine. I(true) for sparse, I(false) for preallocated.
Default value is I(true).
- C(storage_domain) - Specifies the target storage domain for
converted disks. This is required parameter.
version_added: "2.3"
cpu_mode:
description:
- "CPU mode of the virtual machine. It can be some of the following: I(host_passthrough), I(host_model) or I(custom)."
- "For I(host_passthrough) CPU type you need to set C(placement_policy) to I(pinned)."
- "If no value is passed, default value is set by oVirt/RHV engine."
version_added: "2.5"
placement_policy:
description:
- "The configuration of the virtual machine's placement policy."
- "Placement policy can be one of the following values:"
- "C(migratable) - Allow manual and automatic migration."
- "C(pinned) - Do not allow migration."
- "C(user_migratable) - Allow manual migration only."
- "If no value is passed, default value is set by oVirt/RHV engine."
version_added: "2.5"
ticket:
description:
- "If I(true), in addition return I(remote_vv_file) inside I(vm) dictionary, which contains compatible
content for remote-viewer application. Works only C(state) is I(running)."
version_added: "2.7"
type: bool
cpu_pinning:
description:
- "CPU Pinning topology to map virtual machine CPU to host CPU."
- "CPU Pinning topology is a list of dictionary which can have following values:"
- "C(cpu) - Number of the host CPU."
- "C(vcpu) - Number of the virtual machine CPU."
version_added: "2.5"
soundcard_enabled:
description:
- "If I(true), the sound card is added to the virtual machine."
type: bool
version_added: "2.5"
smartcard_enabled:
description:
- "If I(true), use smart card authentication."
type: bool
version_added: "2.5"
io_threads:
description:
- "Number of IO threads used by virtual machine. I(0) means IO threading disabled."
version_added: "2.5"
ballooning_enabled:
description:
- "If I(true), use memory ballooning."
- "Memory balloon is a guest device, which may be used to re-distribute / reclaim the host memory
based on VM needs in a dynamic way. In this way it's possible to create memory over commitment states."
type: bool
version_added: "2.5"
numa_tune_mode:
description:
- "Set how the memory allocation for NUMA nodes of this VM is applied (relevant if NUMA nodes are set for this VM)."
- "It can be one of the following: I(interleave), I(preferred) or I(strict)."
- "If no value is passed, default value is set by oVirt/RHV engine."
choices: ['interleave', 'preferred', 'strict']
version_added: "2.6"
numa_nodes:
description:
- "List of vNUMA Nodes to set for this VM and pin them to assigned host's physical NUMA node."
- "Each vNUMA node is described by following dictionary:"
- "C(index) - The index of this NUMA node (mandatory)."
- "C(memory) - Memory size of the NUMA node in MiB (mandatory)."
- "C(cores) - list of VM CPU cores indexes to be included in this NUMA node (mandatory)."
- "C(numa_node_pins) - list of physical NUMA node indexes to pin this virtual NUMA node to."
version_added: "2.6"
rng_device:
description:
- "Random number generator (RNG). You can choose of one the following devices I(urandom), I(random) or I(hwrng)."
- "In order to select I(hwrng), you must have it enabled on cluster first."
- "/dev/urandom is used for cluster version >= 4.1, and /dev/random for cluster version <= 4.0"
version_added: "2.5"
custom_properties:
description:
- "Properties sent to VDSM to configure various hooks."
- "Custom properties is a list of dictionary which can have following values:"
- "C(name) - Name of the custom property. For example: I(hugepages), I(vhost), I(sap_agent), etc."
- "C(regexp) - Regular expression to set for custom property."
- "C(value) - Value to set for custom property."
version_added: "2.5"
watchdog:
description:
- "Assign watchdog device for the virtual machine."
- "Watchdogs is a dictionary which can have following values:"
- "C(model) - Model of the watchdog device. For example: I(i6300esb), I(diag288) or I(null)."
- "C(action) - Watchdog action to be performed when watchdog is triggered. For example: I(none), I(reset), I(poweroff), I(pause) or I(dump)."
version_added: "2.5"
graphical_console:
description:
- "Assign graphical console to the virtual machine."
- "Graphical console is a dictionary which can have following values:"
- "C(headless_mode) - If I(true) disable the graphics console for this virtual machine."
- "C(protocol) - Graphical protocol, a list of I(spice), I(vnc), or both."
version_added: "2.5"
exclusive:
description:
- "When C(state) is I(exported) this parameter indicates if the existing VM with the
same name should be overwritten."
version_added: "2.8"
type: bool
export_domain:
description:
- "When C(state) is I(exported)this parameter specifies the name of the export storage domain."
version_added: "2.8"
export_ova:
description:
- Dictionary of values to be used to export VM as OVA.
- C(host) - The name of the destination host where the OVA has to be exported.
- C(directory) - The name of the directory where the OVA has to be exported.
- C(filename) - The name of the exported OVA file.
version_added: "2.8"
notes:
- If VM is in I(UNASSIGNED) or I(UNKNOWN) state before any operation, the module will fail.
If VM is in I(IMAGE_LOCKED) state before any operation, we try to wait for VM to be I(DOWN).
If VM is in I(SAVING_STATE) state before any operation, we try to wait for VM to be I(SUSPENDED).
If VM is in I(POWERING_DOWN) state before any operation, we try to wait for VM to be I(UP) or I(DOWN). VM can
get into I(UP) state from I(POWERING_DOWN) state, when there is no ACPI or guest agent running inside VM, or
if the shutdown operation fails.
When user specify I(UP) C(state), we always wait to VM to be in I(UP) state in case VM is I(MIGRATING),
I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). In other states we run start operation on VM.
When user specify I(stopped) C(state), and If user pass C(force) parameter set to I(true) we forcibly stop the VM in
any state. If user don't pass C(force) parameter, we always wait to VM to be in UP state in case VM is
I(MIGRATING), I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or
I(SUSPENDED) state, we start the VM. Then we gracefully shutdown the VM.
When user specify I(suspended) C(state), we always wait to VM to be in UP state in case VM is I(MIGRATING),
I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or I(DOWN) state,
we start the VM. Then we suspend the VM.
When user specify I(absent) C(state), we forcibly stop the VM in any state and remove it.
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
- name: Creates a new Virtual Machine from template named 'rhel7_template'
ovirt_vm:
state: present
name: myvm
template: rhel7_template
cluster: mycluster
- name: Register VM
ovirt_vm:
state: registered
storage_domain: mystorage
cluster: mycluster
name: myvm
- name: Register VM using id
ovirt_vm:
state: registered
storage_domain: mystorage
cluster: mycluster
id: 1111-1111-1111-1111
- name: Register VM, allowing partial import
ovirt_vm:
state: registered
storage_domain: mystorage
allow_partial_import: "True"
cluster: mycluster
id: 1111-1111-1111-1111
- name: Register VM with vnic profile mappings and reassign bad macs
ovirt_vm:
state: registered
storage_domain: mystorage
cluster: mycluster
id: 1111-1111-1111-1111
vnic_profile_mappings:
- source_network_name: mynetwork
source_profile_name: mynetwork
target_profile_id: 3333-3333-3333-3333
- source_network_name: mynetwork2
source_profile_name: mynetwork2
target_profile_id: 4444-4444-4444-4444
reassign_bad_macs: "True"
- name: Register VM with mappings
ovirt_vm:
state: registered
storage_domain: mystorage
cluster: mycluster
id: 1111-1111-1111-1111
role_mappings:
- source_name: Role_A
dest_name: Role_B
domain_mappings:
- source_name: Domain_A
dest_name: Domain_B
lun_mappings:
- source_storage_type: iscsi
source_logical_unit_id: 1IET_000d0001
source_logical_unit_port: 3260
source_logical_unit_portal: 1
source_logical_unit_address: 10.34.63.203
source_logical_unit_target: iqn.2016-08-09.brq.str-01:omachace
dest_storage_type: iscsi
dest_logical_unit_id: 1IET_000d0002
dest_logical_unit_port: 3260
dest_logical_unit_portal: 1
dest_logical_unit_address: 10.34.63.204
dest_logical_unit_target: iqn.2016-08-09.brq.str-02:omachace
affinity_group_mappings:
- source_name: Affinity_A
dest_name: Affinity_B
affinity_label_mappings:
- source_name: Label_A
dest_name: Label_B
cluster_mappings:
- source_name: cluster_A
dest_name: cluster_B
- name: Creates a stateless VM which will always use latest template version
ovirt_vm:
name: myvm
template: rhel7
cluster: mycluster
use_latest_template_version: true
# Creates a new server rhel7 Virtual Machine from Blank template
# on brq01 cluster with 2GiB memory and 2 vcpu cores/sockets
# and attach bootable disk with name rhel7_disk and attach virtio NIC
- ovirt_vm:
state: present
cluster: brq01
name: myvm
memory: 2GiB
cpu_cores: 2
cpu_sockets: 2
cpu_shares: 1024
type: server
operating_system: rhel_7x64
disks:
- name: rhel7_disk
bootable: True
nics:
- name: nic1
# Change VM Name
- ovirt_vm:
id: 00000000-0000-0000-0000-000000000000
name: "new_vm_name"
- name: Run VM with cloud init
ovirt_vm:
name: rhel7
template: rhel7
cluster: Default
memory: 1GiB
high_availability: true
high_availability_priority: 50 # Available from Ansible 2.5
cloud_init:
nic_boot_protocol: static
nic_ip_address: 10.34.60.86
nic_netmask: 255.255.252.0
nic_gateway: 10.34.63.254
nic_name: eth1
nic_on_boot: true
host_name: example.com
custom_script: |
write_files:
- content: |
Hello, world!
path: /tmp/greeting.txt
permissions: '0644'
user_name: root
root_password: super_password
- name: Run VM with cloud init, with multiple network interfaces
ovirt_vm:
name: rhel7_4
template: rhel7
cluster: mycluster
cloud_init_nics:
- nic_name: eth0
nic_boot_protocol: dhcp
nic_on_boot: true
- nic_name: eth1
nic_boot_protocol: static
nic_ip_address: 10.34.60.86
nic_netmask: 255.255.252.0
nic_gateway: 10.34.63.254
nic_on_boot: true
- name: Run VM with sysprep
ovirt_vm:
name: windows2012R2_AD
template: windows2012R2
cluster: Default
memory: 3GiB
high_availability: true
sysprep:
host_name: windowsad.example.com
user_name: Administrator
root_password: SuperPassword123
- name: Migrate/Run VM to/on host named 'host1'
ovirt_vm:
state: running
name: myvm
host: host1
- name: Change VMs CD
ovirt_vm:
name: myvm
cd_iso: drivers.iso
- name: Eject VMs CD
ovirt_vm:
name: myvm
cd_iso: ''
- name: Boot VM from CD
ovirt_vm:
name: myvm
cd_iso: centos7_x64.iso
boot_devices:
- cdrom
- name: Stop vm
ovirt_vm:
state: stopped
name: myvm
- name: Upgrade memory to already created VM
ovirt_vm:
name: myvm
memory: 4GiB
- name: Hot plug memory to already created and running VM (VM won't be restarted)
ovirt_vm:
name: myvm
memory: 4GiB
# Create/update a VM to run with two vNUMA nodes and pin them to physical NUMA nodes as follows:
# vnuma index 0-> numa index 0, vnuma index 1-> numa index 1
- name: Create a VM to run with two vNUMA nodes
ovirt_vm:
name: myvm
cluster: mycluster
numa_tune_mode: "interleave"
numa_nodes:
- index: 0
cores: [0]
memory: 20
numa_node_pins: [0]
- index: 1
cores: [1]
memory: 30
numa_node_pins: [1]
- name: Update an existing VM to run without previously created vNUMA nodes (i.e. remove all vNUMA nodes+NUMA pinning setting)
ovirt_vm:
name: myvm
cluster: mycluster
state: "present"
numa_tune_mode: "interleave"
numa_nodes:
- index: -1
# When change on the VM needs restart of the VM, use next_run state,
# The VM will be updated and rebooted if there are any changes.
# If present state would be used, VM won't be restarted.
- ovirt_vm:
state: next_run
name: myvm
boot_devices:
- network
- name: Import virtual machine from VMware
ovirt_vm:
state: stopped
cluster: mycluster
name: vmware_win10
timeout: 1800
poll_interval: 30
vmware:
url: vpx://user@1.2.3.4/Folder1/Cluster1/2.3.4.5?no_verify=1
name: windows10
storage_domain: mynfs
username: user
password: password
- name: Create vm from template and create all disks on specific storage domain
ovirt_vm:
name: vm_test
cluster: mycluster
template: mytemplate
storage_domain: mynfs
nics:
- name: nic1
- name: Remove VM, if VM is running it will be stopped
ovirt_vm:
state: absent
name: myvm
# Defining a specific quota for a VM:
# Since Ansible 2.5
- ovirt_quotas_facts:
data_center: Default
name: myquota
- ovirt_vm:
name: myvm
sso: False
boot_menu: True
usb_support: True
serial_console: True
quota_id: "{{ ovirt_quotas[0]['id'] }}"
- name: Create a VM that has the console configured for both Spice and VNC
ovirt_vm:
name: myvm
template: mytemplate
cluster: mycluster
graphical_console:
protocol:
- spice
- vnc
# Execute remote viever to VM
- block:
- name: Create a ticket for console for a running VM
ovirt_vms:
name: myvm
ticket: true
state: running
register: myvm
- name: Save ticket to file
copy:
content: "{{ myvm.vm.remote_vv_file }}"
dest: ~/vvfile.vv
- name: Run remote viewer with file
command: remote-viewer ~/vvfile.vv
# Default value of host_device state is present
- name: Attach host devices to virtual machine
ovirt_vm:
name: myvm
host: myhost
placement_policy: pinned
host_devices:
- name: pci_0000_00_06_0
- name: pci_0000_00_07_0
state: absent
- name: pci_0000_00_08_0
state: present
- name: Export the VM as OVA
ovirt_vm:
name: myvm
state: exported
cluster: mycluster
export_ova:
host: myhost
filename: myvm.ova
directory: /tmp/
'''
RETURN = '''
id:
description: ID of the VM which is managed
returned: On success if VM is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
vm:
description: "Dictionary of all the VM attributes. VM attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm.
Additionally when user sent ticket=true, this module will return also remote_vv_file
parameter in vm dictionary, which contains remote-viewer compatible file to open virtual
machine console. Please note that this file contains sensible information."
returned: On success if VM is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_params,
check_sdk,
convert_to_bytes,
create_connection,
equal,
get_dict_of_struct,
get_entity,
get_link_name,
get_id_by_name,
ovirt_full_argument_spec,
search_by_attributes,
search_by_name,
wait,
)
class VmsModule(BaseModule):
def __init__(self, *args, **kwargs):
super(VmsModule, self).__init__(*args, **kwargs)
self._initialization = None
self._is_new = False
def __get_template_with_version(self):
"""
oVirt/RHV in version 4.1 doesn't support search by template+version_number,
so we need to list all templates with specific name and then iterate
through it's version until we find the version we look for.
"""
template = None
templates_service = self._connection.system_service().templates_service()
if self.param('template'):
templates = templates_service.list(
search='name=%s and cluster=%s' % (self.param('template'), self.param('cluster'))
)
if self.param('template_version'):
templates = [
t for t in templates
if t.version.version_number == self.param('template_version')
]
if not templates:
raise ValueError(
"Template with name '%s' and version '%s' in cluster '%s' was not found'" % (
self.param('template'),
self.param('template_version'),
self.param('cluster')
)
)
template = sorted(templates, key=lambda t: t.version.version_number, reverse=True)[0]
elif self._is_new:
# If template isn't specified and VM is about to be created specify default template:
template = templates_service.template_service('00000000-0000-0000-0000-000000000000').get()
return template
def __get_storage_domain_and_all_template_disks(self, template):
if self.param('template') is None:
return None
if self.param('storage_domain') is None:
return None
disks = list()
for att in self._connection.follow_link(template.disk_attachments):
disks.append(
otypes.DiskAttachment(
disk=otypes.Disk(
id=att.disk.id,
format=otypes.DiskFormat(self.param('disk_format')),
storage_domains=[
otypes.StorageDomain(
id=get_id_by_name(
self._connection.system_service().storage_domains_service(),
self.param('storage_domain')
)
)
]
)
)
)
return disks
def build_entity(self):
template = self.__get_template_with_version()
disk_attachments = self.__get_storage_domain_and_all_template_disks(template)
return otypes.Vm(
id=self.param('id'),
name=self.param('name'),
cluster=otypes.Cluster(
name=self.param('cluster')
) if self.param('cluster') else None,
disk_attachments=disk_attachments,
template=otypes.Template(
id=template.id,
) if template else None,
use_latest_template_version=self.param('use_latest_template_version'),
stateless=self.param('stateless') or self.param('use_latest_template_version'),
delete_protected=self.param('delete_protected'),
bios=(
otypes.Bios(boot_menu=otypes.BootMenu(enabled=self.param('boot_menu')))
) if self.param('boot_menu') is not None else None,
console=(
otypes.Console(enabled=self.param('serial_console'))
) if self.param('serial_console') is not None else None,
usb=(
otypes.Usb(enabled=self.param('usb_support'))
) if self.param('usb_support') is not None else None,
sso=(
otypes.Sso(
methods=[otypes.Method(id=otypes.SsoMethod.GUEST_AGENT)] if self.param('sso') else []
)
) if self.param('sso') is not None else None,
quota=otypes.Quota(id=self._module.params.get('quota_id')) if self.param('quota_id') is not None else None,
high_availability=otypes.HighAvailability(
enabled=self.param('high_availability'),
priority=self.param('high_availability_priority'),
) if self.param('high_availability') is not None or self.param('high_availability_priority') else None,
lease=otypes.StorageDomainLease(
storage_domain=otypes.StorageDomain(
id=get_id_by_name(
service=self._connection.system_service().storage_domains_service(),
name=self.param('lease')
)
)
) if self.param('lease') is not None else None,
cpu=otypes.Cpu(
topology=otypes.CpuTopology(
cores=self.param('cpu_cores'),
sockets=self.param('cpu_sockets'),
threads=self.param('cpu_threads'),
) if any((
self.param('cpu_cores'),
self.param('cpu_sockets'),
self.param('cpu_threads')
)) else None,
cpu_tune=otypes.CpuTune(
vcpu_pins=[
otypes.VcpuPin(vcpu=int(pin['vcpu']), cpu_set=str(pin['cpu'])) for pin in self.param('cpu_pinning')
],
) if self.param('cpu_pinning') else None,
mode=otypes.CpuMode(self.param('cpu_mode')) if self.param('cpu_mode') else None,
) if any((
self.param('cpu_cores'),
self.param('cpu_sockets'),
self.param('cpu_threads'),
self.param('cpu_mode'),
self.param('cpu_pinning')
)) else None,
cpu_shares=self.param('cpu_shares'),
os=otypes.OperatingSystem(
type=self.param('operating_system'),
boot=otypes.Boot(
devices=[
otypes.BootDevice(dev) for dev in self.param('boot_devices')
],
) if self.param('boot_devices') else None,
cmdline=self.param('kernel_params') if self.param('kernel_params_persist') else None,
initrd=self.param('initrd_path') if self.param('kernel_params_persist') else None,
kernel=self.param('kernel_path') if self.param('kernel_params_persist') else None,
) if (
self.param('operating_system') or self.param('boot_devices') or self.param('kernel_params_persist')
) else None,
type=otypes.VmType(
self.param('type')
) if self.param('type') else None,
memory=convert_to_bytes(
self.param('memory')
) if self.param('memory') else None,
memory_policy=otypes.MemoryPolicy(
guaranteed=convert_to_bytes(self.param('memory_guaranteed')),
ballooning=self.param('ballooning_enabled'),
max=convert_to_bytes(self.param('memory_max')),
) if any((
self.param('memory_guaranteed'),
self.param('ballooning_enabled') is not None,
self.param('memory_max')
)) else None,
instance_type=otypes.InstanceType(
id=get_id_by_name(
self._connection.system_service().instance_types_service(),
self.param('instance_type'),
),
) if self.param('instance_type') else None,
custom_compatibility_version=otypes.Version(
major=self._get_major(self.param('custom_compatibility_version')),
minor=self._get_minor(self.param('custom_compatibility_version')),
) if self.param('custom_compatibility_version') is not None else None,
description=self.param('description'),
comment=self.param('comment'),
time_zone=otypes.TimeZone(
name=self.param('timezone'),
) if self.param('timezone') else None,
serial_number=otypes.SerialNumber(
policy=otypes.SerialNumberPolicy(self.param('serial_policy')),
value=self.param('serial_policy_value'),
) if (
self.param('serial_policy') is not None or
self.param('serial_policy_value') is not None
) else None,
placement_policy=otypes.VmPlacementPolicy(
affinity=otypes.VmAffinity(self.param('placement_policy')),
hosts=[
otypes.Host(name=self.param('host')),
] if self.param('host') else None,
) if self.param('placement_policy') else None,
soundcard_enabled=self.param('soundcard_enabled'),
display=otypes.Display(
smartcard_enabled=self.param('smartcard_enabled')
) if self.param('smartcard_enabled') is not None else None,
io=otypes.Io(
threads=self.param('io_threads'),
) if self.param('io_threads') is not None else None,
numa_tune_mode=otypes.NumaTuneMode(
self.param('numa_tune_mode')
) if self.param('numa_tune_mode') else None,
rng_device=otypes.RngDevice(
source=otypes.RngSource(self.param('rng_device')),
) if self.param('rng_device') else None,
custom_properties=[
otypes.CustomProperty(
name=cp.get('name'),
regexp=cp.get('regexp'),
value=str(cp.get('value')),
) for cp in self.param('custom_properties') if cp
] if self.param('custom_properties') is not None else None,
initialization=self.get_initialization() if self.param('cloud_init_persist') else None,
)
def _get_export_domain_service(self):
provider_name = self._module.params['export_domain']
export_sds_service = self._connection.system_service().storage_domains_service()
export_sd_id = get_id_by_name(export_sds_service, provider_name)
return export_sds_service.service(export_sd_id)
def post_export_action(self, entity):
self._service = self._get_export_domain_service().vms_service()
def update_check(self, entity):
res = self._update_check(entity)
if entity.next_run_configuration_exists:
res = res and self._update_check(self._service.service(entity.id).get(next_run=True))
return res
def _update_check(self, entity):
def check_cpu_pinning():
if self.param('cpu_pinning'):
current = []
if entity.cpu.cpu_tune:
current = [(str(pin.cpu_set), int(pin.vcpu)) for pin in entity.cpu.cpu_tune.vcpu_pins]
passed = [(str(pin['cpu']), int(pin['vcpu'])) for pin in self.param('cpu_pinning')]
return sorted(current) == sorted(passed)
return True
def check_custom_properties():
if self.param('custom_properties'):
current = []
if entity.custom_properties:
current = [(cp.name, cp.regexp, str(cp.value)) for cp in entity.custom_properties]
passed = [(cp.get('name'), cp.get('regexp'), str(cp.get('value'))) for cp in self.param('custom_properties') if cp]
return sorted(current) == sorted(passed)
return True
def check_host():
if self.param('host') is not None:
return self.param('host') in [self._connection.follow_link(host).name for host in getattr(entity.placement_policy, 'hosts', None) or []]
return True
def check_custom_compatibility_version():
if self.param('custom_compatibility_version') is not None:
return (self._get_minor(self.param('custom_compatibility_version')) == self._get_minor(entity.custom_compatibility_version) and
self._get_major(self.param('custom_compatibility_version')) == self._get_major(entity.custom_compatibility_version))
return True
cpu_mode = getattr(entity.cpu, 'mode')
vm_display = entity.display
return (
check_cpu_pinning() and
check_custom_properties() and
check_host() and
check_custom_compatibility_version() and
not self.param('cloud_init_persist') and
not self.param('kernel_params_persist') and
equal(self.param('cluster'), get_link_name(self._connection, entity.cluster)) and equal(convert_to_bytes(self.param('memory')), entity.memory) and
equal(convert_to_bytes(self.param('memory_guaranteed')), entity.memory_policy.guaranteed) and
equal(convert_to_bytes(self.param('memory_max')), entity.memory_policy.max) and
equal(self.param('cpu_cores'), entity.cpu.topology.cores) and
equal(self.param('cpu_sockets'), entity.cpu.topology.sockets) and
equal(self.param('cpu_threads'), entity.cpu.topology.threads) and
equal(self.param('cpu_mode'), str(cpu_mode) if cpu_mode else None) and
equal(self.param('type'), str(entity.type)) and
equal(self.param('name'), str(entity.name)) and
equal(self.param('operating_system'), str(entity.os.type)) and
equal(self.param('boot_menu'), entity.bios.boot_menu.enabled) and
equal(self.param('soundcard_enabled'), entity.soundcard_enabled) and
equal(self.param('smartcard_enabled'), getattr(vm_display, 'smartcard_enabled', False)) and
equal(self.param('io_threads'), entity.io.threads) and
equal(self.param('ballooning_enabled'), entity.memory_policy.ballooning) and
equal(self.param('serial_console'), getattr(entity.console, 'enabled', None)) and
equal(self.param('usb_support'), entity.usb.enabled) and
equal(self.param('sso'), True if entity.sso.methods else False) and
equal(self.param('quota_id'), getattr(entity.quota, 'id', None)) and
equal(self.param('high_availability'), entity.high_availability.enabled) and
equal(self.param('high_availability_priority'), entity.high_availability.priority) and
equal(self.param('lease'), get_link_name(self._connection, getattr(entity.lease, 'storage_domain', None))) and
equal(self.param('stateless'), entity.stateless) and
equal(self.param('cpu_shares'), entity.cpu_shares) and
equal(self.param('delete_protected'), entity.delete_protected) and
equal(self.param('use_latest_template_version'), entity.use_latest_template_version) and
equal(self.param('boot_devices'), [str(dev) for dev in getattr(entity.os.boot, 'devices', [])]) and
equal(self.param('instance_type'), get_link_name(self._connection, entity.instance_type), ignore_case=True) and
equal(self.param('description'), entity.description) and
equal(self.param('comment'), entity.comment) and
equal(self.param('timezone'), getattr(entity.time_zone, 'name', None)) and
equal(self.param('serial_policy'), str(getattr(entity.serial_number, 'policy', None))) and
equal(self.param('serial_policy_value'), getattr(entity.serial_number, 'value', None)) and
equal(self.param('placement_policy'), str(entity.placement_policy.affinity) if entity.placement_policy else None) and
equal(self.param('numa_tune_mode'), str(entity.numa_tune_mode)) and
equal(self.param('rng_device'), str(entity.rng_device.source) if entity.rng_device else None)
)
def pre_create(self, entity):
# Mark if entity exists before touching it:
if entity is None:
self._is_new = True
def post_update(self, entity):
self.post_present(entity.id)
def post_present(self, entity_id):
# After creation of the VM, attach disks and NICs:
entity = self._service.service(entity_id).get()
self.__attach_disks(entity)
self.__attach_nics(entity)
self._attach_cd(entity)
self.changed = self.__attach_numa_nodes(entity)
self.changed = self.__attach_watchdog(entity)
self.changed = self.__attach_graphical_console(entity)
self.changed = self.__attach_host_devices(entity)
def pre_remove(self, entity):
# Forcibly stop the VM, if it's not in DOWN state:
if entity.status != otypes.VmStatus.DOWN:
if not self._module.check_mode:
self.changed = self.action(
action='stop',
action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
)['changed']
def __suspend_shutdown_common(self, vm_service):
if vm_service.get().status in [
otypes.VmStatus.MIGRATING,
otypes.VmStatus.POWERING_UP,
otypes.VmStatus.REBOOT_IN_PROGRESS,
otypes.VmStatus.WAIT_FOR_LAUNCH,
otypes.VmStatus.UP,
otypes.VmStatus.RESTORING_STATE,
]:
self._wait_for_UP(vm_service)
def _pre_shutdown_action(self, entity):
vm_service = self._service.vm_service(entity.id)
self.__suspend_shutdown_common(vm_service)
if entity.status in [otypes.VmStatus.SUSPENDED, otypes.VmStatus.PAUSED]:
vm_service.start()
self._wait_for_UP(vm_service)
return vm_service.get()
def _pre_suspend_action(self, entity):
vm_service = self._service.vm_service(entity.id)
self.__suspend_shutdown_common(vm_service)
if entity.status in [otypes.VmStatus.PAUSED, otypes.VmStatus.DOWN]:
vm_service.start()
self._wait_for_UP(vm_service)
return vm_service.get()
def _post_start_action(self, entity):
vm_service = self._service.service(entity.id)
self._wait_for_UP(vm_service)
self._attach_cd(vm_service.get())
self._migrate_vm(vm_service.get())
def _attach_cd(self, entity):
cd_iso = self.param('cd_iso')
if cd_iso is not None:
vm_service = self._service.service(entity.id)
current = vm_service.get().status == otypes.VmStatus.UP and self.param('state') == 'running'
cdroms_service = vm_service.cdroms_service()
cdrom_device = cdroms_service.list()[0]
cdrom_service = cdroms_service.cdrom_service(cdrom_device.id)
cdrom = cdrom_service.get(current=current)
if getattr(cdrom.file, 'id', '') != cd_iso:
if not self._module.check_mode:
cdrom_service.update(
cdrom=otypes.Cdrom(
file=otypes.File(id=cd_iso)
),
current=current,
)
self.changed = True
return entity
def _migrate_vm(self, entity):
vm_host = self.param('host')
vm_service = self._service.vm_service(entity.id)
if vm_host is not None:
# In case VM is preparing to be UP, wait to be up, to migrate it:
if entity.status == otypes.VmStatus.UP:
hosts_service = self._connection.system_service().hosts_service()
current_vm_host = hosts_service.host_service(entity.host.id).get().name
if vm_host != current_vm_host:
if not self._module.check_mode:
vm_service.migrate(host=otypes.Host(name=vm_host))
self._wait_for_UP(vm_service)
self.changed = True
return entity
def _wait_for_UP(self, vm_service):
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.UP,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
def _wait_for_vm_disks(self, vm_service):
disks_service = self._connection.system_service().disks_service()
for da in vm_service.disk_attachments_service().list():
disk_service = disks_service.disk_service(da.disk.id)
wait(
service=disk_service,
condition=lambda disk: disk.status == otypes.DiskStatus.OK,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
def wait_for_down(self, vm):
"""
This function will first wait for the status DOWN of the VM.
Then it will find the active snapshot and wait until it's state is OK for
stateless VMs and statless snaphot is removed.
"""
vm_service = self._service.vm_service(vm.id)
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
if vm.stateless:
snapshots_service = vm_service.snapshots_service()
snapshots = snapshots_service.list()
snap_active = [
snap for snap in snapshots
if snap.snapshot_type == otypes.SnapshotType.ACTIVE
][0]
snap_stateless = [
snap for snap in snapshots
if snap.snapshot_type == otypes.SnapshotType.STATELESS
]
# Stateless snapshot may be already removed:
if snap_stateless:
"""
We need to wait for Active snapshot ID, to be removed as it's current
stateless snapshot. Then we need to wait for staless snapshot ID to
be read, for use, because it will become active snapshot.
"""
wait(
service=snapshots_service.snapshot_service(snap_active.id),
condition=lambda snap: snap is None,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
wait(
service=snapshots_service.snapshot_service(snap_stateless[0].id),
condition=lambda snap: snap.snapshot_status == otypes.SnapshotStatus.OK,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
return True
def __attach_graphical_console(self, entity):
graphical_console = self.param('graphical_console')
if not graphical_console:
return False
vm_service = self._service.service(entity.id)
gcs_service = vm_service.graphics_consoles_service()
graphical_consoles = gcs_service.list()
# Remove all graphical consoles if there are any:
if bool(graphical_console.get('headless_mode')):
if not self._module.check_mode:
for gc in graphical_consoles:
gcs_service.console_service(gc.id).remove()
return len(graphical_consoles) > 0
# If there are not gc add any gc to be added:
protocol = graphical_console.get('protocol')
if isinstance(protocol, str):
protocol = [protocol]
current_protocols = [str(gc.protocol) for gc in graphical_consoles]
if not current_protocols:
if not self._module.check_mode:
for p in protocol:
gcs_service.add(
otypes.GraphicsConsole(
protocol=otypes.GraphicsType(p),
)
)
return True
# Update consoles:
if sorted(protocol) != sorted(current_protocols):
if not self._module.check_mode:
for gc in graphical_consoles:
gcs_service.console_service(gc.id).remove()
for p in protocol:
gcs_service.add(
otypes.GraphicsConsole(
protocol=otypes.GraphicsType(p),
)
)
return True
def __attach_disks(self, entity):
if not self.param('disks'):
return
vm_service = self._service.service(entity.id)
disks_service = self._connection.system_service().disks_service()
disk_attachments_service = vm_service.disk_attachments_service()
self._wait_for_vm_disks(vm_service)
for disk in self.param('disks'):
# If disk ID is not specified, find disk by name:
disk_id = disk.get('id')
if disk_id is None:
disk_id = getattr(
search_by_name(
service=disks_service,
name=disk.get('name')
),
'id',
None
)
# Attach disk to VM:
disk_attachment = disk_attachments_service.attachment_service(disk_id)
if get_entity(disk_attachment) is None:
if not self._module.check_mode:
disk_attachments_service.add(
otypes.DiskAttachment(
disk=otypes.Disk(
id=disk_id,
),
active=disk.get('activate', True),
interface=otypes.DiskInterface(
disk.get('interface', 'virtio')
),
bootable=disk.get('bootable', False),
)
)
self.changed = True
def __get_vnic_profile_id(self, nic):
"""
Return VNIC profile ID looked up by it's name, because there can be
more VNIC profiles with same name, other criteria of filter is cluster.
"""
vnics_service = self._connection.system_service().vnic_profiles_service()
clusters_service = self._connection.system_service().clusters_service()
cluster = search_by_name(clusters_service, self.param('cluster'))
profiles = [
profile for profile in vnics_service.list()
if profile.name == nic.get('profile_name')
]
cluster_networks = [
net.id for net in self._connection.follow_link(cluster.networks)
]
try:
return next(
profile.id for profile in profiles
if profile.network.id in cluster_networks
)
except StopIteration:
raise Exception(
"Profile '%s' was not found in cluster '%s'" % (
nic.get('profile_name'),
self.param('cluster')
)
)
def __attach_numa_nodes(self, entity):
updated = False
numa_nodes_service = self._service.service(entity.id).numa_nodes_service()
if len(self.param('numa_nodes')) > 0:
# Remove all existing virtual numa nodes before adding new ones
existed_numa_nodes = numa_nodes_service.list()
existed_numa_nodes.sort(reverse=len(existed_numa_nodes) > 1 and existed_numa_nodes[1].index > existed_numa_nodes[0].index)
for current_numa_node in existed_numa_nodes:
numa_nodes_service.node_service(current_numa_node.id).remove()
updated = True
for numa_node in self.param('numa_nodes'):
if numa_node is None or numa_node.get('index') is None or numa_node.get('cores') is None or numa_node.get('memory') is None:
continue
numa_nodes_service.add(
otypes.VirtualNumaNode(
index=numa_node.get('index'),
memory=numa_node.get('memory'),
cpu=otypes.Cpu(
cores=[
otypes.Core(
index=core
) for core in numa_node.get('cores')
],
),
numa_node_pins=[
otypes.NumaNodePin(
index=pin
) for pin in numa_node.get('numa_node_pins')
] if numa_node.get('numa_node_pins') is not None else None,
)
)
updated = True
return updated
def __attach_watchdog(self, entity):
watchdogs_service = self._service.service(entity.id).watchdogs_service()
watchdog = self.param('watchdog')
if watchdog is not None:
current_watchdog = next(iter(watchdogs_service.list()), None)
if watchdog.get('model') is None and current_watchdog:
watchdogs_service.watchdog_service(current_watchdog.id).remove()
return True
elif watchdog.get('model') is not None and current_watchdog is None:
watchdogs_service.add(
otypes.Watchdog(
model=otypes.WatchdogModel(watchdog.get('model').lower()),
action=otypes.WatchdogAction(watchdog.get('action')),
)
)
return True
elif current_watchdog is not None:
if (
str(current_watchdog.model).lower() != watchdog.get('model').lower() or
str(current_watchdog.action).lower() != watchdog.get('action').lower()
):
watchdogs_service.watchdog_service(current_watchdog.id).update(
otypes.Watchdog(
model=otypes.WatchdogModel(watchdog.get('model')),
action=otypes.WatchdogAction(watchdog.get('action')),
)
)
return True
return False
def __attach_nics(self, entity):
# Attach NICs to VM, if specified:
nics_service = self._service.service(entity.id).nics_service()
for nic in self.param('nics'):
if search_by_name(nics_service, nic.get('name')) is None:
if not self._module.check_mode:
nics_service.add(
otypes.Nic(
name=nic.get('name'),
interface=otypes.NicInterface(
nic.get('interface', 'virtio')
),
vnic_profile=otypes.VnicProfile(
id=self.__get_vnic_profile_id(nic),
) if nic.get('profile_name') else None,
mac=otypes.Mac(
address=nic.get('mac_address')
) if nic.get('mac_address') else None,
)
)
self.changed = True
def get_initialization(self):
if self._initialization is not None:
return self._initialization
sysprep = self.param('sysprep')
cloud_init = self.param('cloud_init')
cloud_init_nics = self.param('cloud_init_nics') or []
if cloud_init is not None:
cloud_init_nics.append(cloud_init)
if cloud_init or cloud_init_nics:
self._initialization = otypes.Initialization(
nic_configurations=[
otypes.NicConfiguration(
boot_protocol=otypes.BootProtocol(
nic.pop('nic_boot_protocol').lower()
) if nic.get('nic_boot_protocol') else None,
name=nic.pop('nic_name', None),
on_boot=nic.pop('nic_on_boot', None),
ip=otypes.Ip(
address=nic.pop('nic_ip_address', None),
netmask=nic.pop('nic_netmask', None),
gateway=nic.pop('nic_gateway', None),
) if (
nic.get('nic_gateway') is not None or
nic.get('nic_netmask') is not None or
nic.get('nic_ip_address') is not None
) else None,
)
for nic in cloud_init_nics
if (
nic.get('nic_gateway') is not None or
nic.get('nic_netmask') is not None or
nic.get('nic_ip_address') is not None or
nic.get('nic_boot_protocol') is not None or
nic.get('nic_on_boot') is not None
)
] if cloud_init_nics else None,
**cloud_init
)
elif sysprep:
self._initialization = otypes.Initialization(
**sysprep
)
return self._initialization
def __attach_host_devices(self, entity):
vm_service = self._service.service(entity.id)
host_devices_service = vm_service.host_devices_service()
host_devices = self.param('host_devices')
updated = False
if host_devices:
device_names = [dev.name for dev in host_devices_service.list()]
for device in host_devices:
device_name = device.get('name')
state = device.get('state', 'present')
if state == 'absent' and device_name in device_names:
updated = True
if not self._module.check_mode:
device_id = get_id_by_name(host_devices_service, device.get('name'))
host_devices_service.device_service(device_id).remove()
elif state == 'present' and device_name not in device_names:
updated = True
if not self._module.check_mode:
host_devices_service.add(
otypes.HostDevice(
name=device.get('name'),
)
)
return updated
def _get_role_mappings(module):
roleMappings = list()
for roleMapping in module.params['role_mappings']:
roleMappings.append(
otypes.RegistrationRoleMapping(
from_=otypes.Role(
name=roleMapping['source_name'],
) if roleMapping['source_name'] else None,
to=otypes.Role(
name=roleMapping['dest_name'],
) if roleMapping['dest_name'] else None,
)
)
return roleMappings
def _get_affinity_group_mappings(module):
affinityGroupMappings = list()
for affinityGroupMapping in module.params['affinity_group_mappings']:
affinityGroupMappings.append(
otypes.RegistrationAffinityGroupMapping(
from_=otypes.AffinityGroup(
name=affinityGroupMapping['source_name'],
) if affinityGroupMapping['source_name'] else None,
to=otypes.AffinityGroup(
name=affinityGroupMapping['dest_name'],
) if affinityGroupMapping['dest_name'] else None,
)
)
return affinityGroupMappings
def _get_affinity_label_mappings(module):
affinityLabelMappings = list()
for affinityLabelMapping in module.params['affinity_label_mappings']:
affinityLabelMappings.append(
otypes.RegistrationAffinityLabelMapping(
from_=otypes.AffinityLabel(
name=affinityLabelMapping['source_name'],
) if affinityLabelMapping['source_name'] else None,
to=otypes.AffinityLabel(
name=affinityLabelMapping['dest_name'],
) if affinityLabelMapping['dest_name'] else None,
)
)
return affinityLabelMappings
def _get_domain_mappings(module):
domainMappings = list()
for domainMapping in module.params['domain_mappings']:
domainMappings.append(
otypes.RegistrationDomainMapping(
from_=otypes.Domain(
name=domainMapping['source_name'],
) if domainMapping['source_name'] else None,
to=otypes.Domain(
name=domainMapping['dest_name'],
) if domainMapping['dest_name'] else None,
)
)
return domainMappings
def _get_lun_mappings(module):
lunMappings = list()
for lunMapping in module.params['lun_mappings']:
lunMappings.append(
otypes.RegistrationLunMapping(
from_=otypes.Disk(
lun_storage=otypes.HostStorage(
type=otypes.StorageType(lunMapping['source_storage_type'])
if (lunMapping['source_storage_type'] in
['iscsi', 'fcp']) else None,
logical_units=[
otypes.LogicalUnit(
id=lunMapping['source_logical_unit_id'],
)
],
),
) if lunMapping['source_logical_unit_id'] else None,
to=otypes.Disk(
lun_storage=otypes.HostStorage(
type=otypes.StorageType(lunMapping['dest_storage_type'])
if (lunMapping['dest_storage_type'] in
['iscsi', 'fcp']) else None,
logical_units=[
otypes.LogicalUnit(
id=lunMapping['dest_logical_unit_id'],
port=lunMapping['dest_logical_unit_port'],
portal=lunMapping['dest_logical_unit_portal'],
address=lunMapping['dest_logical_unit_address'],
target=lunMapping['dest_logical_unit_target'],
password=lunMapping['dest_logical_unit_password'],
username=lunMapping['dest_logical_unit_username'],
)
],
),
) if lunMapping['dest_logical_unit_id'] else None,
),
),
return lunMappings
def _get_cluster_mappings(module):
clusterMappings = list()
for clusterMapping in module.params['cluster_mappings']:
clusterMappings.append(
otypes.RegistrationClusterMapping(
from_=otypes.Cluster(
name=clusterMapping['source_name'],
),
to=otypes.Cluster(
name=clusterMapping['dest_name'],
) if clusterMapping['dest_name'] else None,
)
)
return clusterMappings
def _get_vnic_profile_mappings(module):
vnicProfileMappings = list()
for vnicProfileMapping in module.params['vnic_profile_mappings']:
vnicProfileMappings.append(
otypes.VnicProfileMapping(
source_network_name=vnicProfileMapping['source_network_name'],
source_network_profile_name=vnicProfileMapping['source_profile_name'],
target_vnic_profile=otypes.VnicProfile(
id=vnicProfileMapping['target_profile_id'],
) if vnicProfileMapping['target_profile_id'] else None,
)
)
return vnicProfileMappings
def import_vm(module, connection):
vms_service = connection.system_service().vms_service()
if search_by_name(vms_service, module.params['name']) is not None:
return False
events_service = connection.system_service().events_service()
last_event = events_service.list(max=1)[0]
external_type = [
tmp for tmp in ['kvm', 'xen', 'vmware']
if module.params[tmp] is not None
][0]
external_vm = module.params[external_type]
imports_service = connection.system_service().external_vm_imports_service()
imported_vm = imports_service.add(
otypes.ExternalVmImport(
vm=otypes.Vm(
name=module.params['name']
),
name=external_vm.get('name'),
username=external_vm.get('username', 'test'),
password=external_vm.get('password', 'test'),
provider=otypes.ExternalVmProviderType(external_type),
url=external_vm.get('url'),
cluster=otypes.Cluster(
name=module.params['cluster'],
) if module.params['cluster'] else None,
storage_domain=otypes.StorageDomain(
name=external_vm.get('storage_domain'),
) if external_vm.get('storage_domain') else None,
sparse=external_vm.get('sparse', True),
host=otypes.Host(
name=module.params['host'],
) if module.params['host'] else None,
)
)
# Wait until event with code 1152 for our VM don't appear:
vms_service = connection.system_service().vms_service()
wait(
service=vms_service.vm_service(imported_vm.vm.id),
condition=lambda vm: len([
event
for event in events_service.list(
from_=int(last_event.id),
search='type=1152 and vm.id=%s' % vm.id,
)
]) > 0 if vm is not None else False,
fail_condition=lambda vm: vm is None,
timeout=module.params['timeout'],
poll_interval=module.params['poll_interval'],
)
return True
def control_state(vm, vms_service, module):
if vm is None:
return
force = module.params['force']
state = module.params['state']
vm_service = vms_service.vm_service(vm.id)
if vm.status == otypes.VmStatus.IMAGE_LOCKED:
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
)
elif vm.status == otypes.VmStatus.SAVING_STATE:
# Result state is SUSPENDED, we should wait to be suspended:
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED,
)
elif (
vm.status == otypes.VmStatus.UNASSIGNED or
vm.status == otypes.VmStatus.UNKNOWN
):
# Invalid states:
module.fail_json(msg="Not possible to control VM, if it's in '{0}' status".format(vm.status))
elif vm.status == otypes.VmStatus.POWERING_DOWN:
if (force and state == 'stopped') or state == 'absent':
vm_service.stop()
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
)
else:
# If VM is powering down, wait to be DOWN or UP.
# VM can end in UP state in case there is no GA
# or ACPI on the VM or shutdown operation crashed:
wait(
service=vm_service,
condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(type='str', default='present', choices=['absent', 'next_run', 'present', 'registered', 'running', 'stopped', 'suspended', 'exported']),
name=dict(type='str'),
id=dict(type='str'),
cluster=dict(type='str'),
allow_partial_import=dict(type='bool'),
template=dict(type='str'),
template_version=dict(type='int'),
use_latest_template_version=dict(type='bool'),
storage_domain=dict(type='str'),
disk_format=dict(type='str', default='cow', choices=['cow', 'raw']),
disks=dict(type='list', default=[]),
memory=dict(type='str'),
memory_guaranteed=dict(type='str'),
memory_max=dict(type='str'),
cpu_sockets=dict(type='int'),
cpu_cores=dict(type='int'),
cpu_shares=dict(type='int'),
cpu_threads=dict(type='int'),
type=dict(type='str', choices=['server', 'desktop', 'high_performance']),
operating_system=dict(type='str'),
cd_iso=dict(type='str'),
boot_devices=dict(type='list', choices=['cdrom', 'hd', 'network']),
vnic_profile_mappings=dict(default=[], type='list'),
cluster_mappings=dict(default=[], type='list'),
role_mappings=dict(default=[], type='list'),
affinity_group_mappings=dict(default=[], type='list'),
affinity_label_mappings=dict(default=[], type='list'),
lun_mappings=dict(default=[], type='list'),
domain_mappings=dict(default=[], type='list'),
reassign_bad_macs=dict(default=None, type='bool'),
boot_menu=dict(type='bool'),
serial_console=dict(type='bool'),
usb_support=dict(type='bool'),
sso=dict(type='bool'),
quota_id=dict(type='str'),
high_availability=dict(type='bool'),
high_availability_priority=dict(type='int'),
lease=dict(type='str'),
stateless=dict(type='bool'),
delete_protected=dict(type='bool'),
force=dict(type='bool', default=False),
nics=dict(type='list', default=[]),
cloud_init=dict(type='dict'),
cloud_init_nics=dict(type='list', default=[]),
cloud_init_persist=dict(type='bool', default=False, aliases=['sysprep_persist']),
kernel_params_persist=dict(type='bool', default=False),
sysprep=dict(type='dict'),
host=dict(type='str'),
clone=dict(type='bool', default=False),
clone_permissions=dict(type='bool', default=False),
kernel_path=dict(type='str'),
initrd_path=dict(type='str'),
kernel_params=dict(type='str'),
instance_type=dict(type='str'),
description=dict(type='str'),
comment=dict(type='str'),
timezone=dict(type='str'),
serial_policy=dict(type='str', choices=['vm', 'host', 'custom']),
serial_policy_value=dict(type='str'),
vmware=dict(type='dict'),
xen=dict(type='dict'),
kvm=dict(type='dict'),
cpu_mode=dict(type='str'),
placement_policy=dict(type='str'),
custom_compatibility_version=dict(type='str'),
ticket=dict(type='bool', default=None),
cpu_pinning=dict(type='list'),
soundcard_enabled=dict(type='bool', default=None),
smartcard_enabled=dict(type='bool', default=None),
io_threads=dict(type='int', default=None),
ballooning_enabled=dict(type='bool', default=None),
rng_device=dict(type='str'),
numa_tune_mode=dict(type='str', choices=['interleave', 'preferred', 'strict']),
numa_nodes=dict(type='list', default=[]),
custom_properties=dict(type='list'),
watchdog=dict(type='dict'),
host_devices=dict(type='list'),
graphical_console=dict(type='dict'),
exclusive=dict(type='bool'),
export_domain=dict(default=None),
export_ova=dict(type='dict'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['id', 'name']],
)
check_sdk(module)
check_params(module)
try:
state = module.params['state']
auth = module.params.pop('auth')
connection = create_connection(auth)
vms_service = connection.system_service().vms_service()
vms_module = VmsModule(
connection=connection,
module=module,
service=vms_service,
)
vm = vms_module.search_entity(list_params={'all_content': True})
control_state(vm, vms_service, module)
if state in ('present', 'running', 'next_run'):
if module.params['xen'] or module.params['kvm'] or module.params['vmware']:
vms_module.changed = import_vm(module, connection)
# In case VM don't exist, wait for VM DOWN state,
# otherwise don't wait for any state, just update VM:
ret = vms_module.create(
entity=vm,
result_state=otypes.VmStatus.DOWN if vm is None else None,
clone=module.params['clone'],
clone_permissions=module.params['clone_permissions'],
)
# If VM is going to be created and check_mode is on, return now:
if module.check_mode and ret.get('id') is None:
module.exit_json(**ret)
vms_module.post_present(ret['id'])
# Run the VM if it was just created, else don't run it:
if state == 'running':
def kernel_persist_check():
return (module.params.get('kernel_params') or
module.params.get('initrd_path') or
module.params.get('kernel_path')
and not module.params.get('cloud_init_persist'))
initialization = vms_module.get_initialization()
ret = vms_module.action(
action='start',
post_action=vms_module._post_start_action,
action_condition=lambda vm: (
vm.status not in [
otypes.VmStatus.MIGRATING,
otypes.VmStatus.POWERING_UP,
otypes.VmStatus.REBOOT_IN_PROGRESS,
otypes.VmStatus.WAIT_FOR_LAUNCH,
otypes.VmStatus.UP,
otypes.VmStatus.RESTORING_STATE,
]
),
wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
# Start action kwargs:
use_cloud_init=True if not module.params.get('cloud_init_persist') and module.params.get('cloud_init') is not None else None,
use_sysprep=True if not module.params.get('cloud_init_persist') and module.params.get('sysprep') is not None else None,
vm=otypes.Vm(
placement_policy=otypes.VmPlacementPolicy(
hosts=[otypes.Host(name=module.params['host'])]
) if module.params['host'] else None,
initialization=initialization,
os=otypes.OperatingSystem(
cmdline=module.params.get('kernel_params'),
initrd=module.params.get('initrd_path'),
kernel=module.params.get('kernel_path'),
) if (kernel_persist_check()) else None,
) if (
kernel_persist_check() or
module.params.get('host') or
initialization is not None
and not module.params.get('cloud_init_persist')
) else None,
)
if module.params['ticket']:
vm_service = vms_service.vm_service(ret['id'])
graphics_consoles_service = vm_service.graphics_consoles_service()
graphics_console = graphics_consoles_service.list()[0]
console_service = graphics_consoles_service.console_service(graphics_console.id)
ticket = console_service.remote_viewer_connection_file()
if ticket:
ret['vm']['remote_vv_file'] = ticket
if state == 'next_run':
# Apply next run configuration, if needed:
vm = vms_service.vm_service(ret['id']).get()
if vm.next_run_configuration_exists:
ret = vms_module.action(
action='reboot',
entity=vm,
action_condition=lambda vm: vm.status == otypes.VmStatus.UP,
wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
)
ret['changed'] = vms_module.changed
elif state == 'stopped':
if module.params['xen'] or module.params['kvm'] or module.params['vmware']:
vms_module.changed = import_vm(module, connection)
ret = vms_module.create(
entity=vm,
result_state=otypes.VmStatus.DOWN if vm is None else None,
clone=module.params['clone'],
clone_permissions=module.params['clone_permissions'],
)
if module.params['force']:
ret = vms_module.action(
action='stop',
action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
wait_condition=vms_module.wait_for_down,
)
else:
ret = vms_module.action(
action='shutdown',
pre_action=vms_module._pre_shutdown_action,
action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
wait_condition=vms_module.wait_for_down,
)
vms_module.post_present(ret['id'])
| elif state == 'suspended': | 8,276 | lcc_e | python | null | 416048f78a3be5be5fdef7a8d39c04ebbbdfa50703b682bb |
|
#!/usr/bin/python -t
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2005 Duke University
# Written by Seth Vidal
"""
Command line interface yum class and related.
"""
import os
import re
import sys
import time
import random
import logging
from optparse import OptionParser,OptionGroup,SUPPRESS_HELP
import rpm
from weakref import proxy as weakref
import output
import shell
import yum
import yum.Errors
import yum.logginglevels
import yum.misc
import yum.plugins
from rpmUtils.arch import isMultiLibArch
from yum import _, P_
from yum.rpmtrans import RPMTransaction
import signal
import yumcommands
from yum.i18n import to_unicode, to_utf8, exception2msg
# This is for yum-utils/yumdownloader in RHEL-5, where it isn't importing this
# directly but did do "from cli import *", and we did have this in 3.2.22. I
# just _love_ how python re-exports these by default.
# pylint: disable-msg=W0611
from yum.packages import parsePackages
# pylint: enable-msg=W0611
def sigquit(signum, frame):
"""SIGQUIT handler for the yum cli. This function will print an
error message and exit the program.
:param signum: unused
:param frame: unused
"""
print >> sys.stderr, "Quit signal sent - exiting immediately"
sys.exit(1)
class CliError(yum.Errors.YumBaseError):
"""Command line interface related Exception."""
def __init__(self, args=''):
yum.Errors.YumBaseError.__init__(self)
self.args = args
def sys_inhibit(what, who, why, mode):
""" Tell systemd to inhibit shutdown, via. dbus. """
try:
import dbus
bus = dbus.SystemBus()
proxy = bus.get_object('org.freedesktop.login1',
'/org/freedesktop/login1')
iface = dbus.Interface(proxy, 'org.freedesktop.login1.Manager')
return iface.Inhibit(what, who, why, mode)
except:
return None
class YumBaseCli(yum.YumBase, output.YumOutput):
"""This is the base class for yum cli."""
def __init__(self):
# handle sigquit early on
signal.signal(signal.SIGQUIT, sigquit)
yum.YumBase.__init__(self)
output.YumOutput.__init__(self)
logging.basicConfig()
self.logger = logging.getLogger("yum.cli")
self.verbose_logger = logging.getLogger("yum.verbose.cli")
self.yum_cli_commands = {}
self.use_txmbr_in_callback = True
self.registerCommand(yumcommands.InstallCommand())
self.registerCommand(yumcommands.UpdateCommand())
self.registerCommand(yumcommands.InfoCommand())
self.registerCommand(yumcommands.ListCommand())
self.registerCommand(yumcommands.EraseCommand())
self.registerCommand(yumcommands.AutoremoveCommand())
self.registerCommand(yumcommands.GroupsCommand())
self.registerCommand(yumcommands.MakeCacheCommand())
self.registerCommand(yumcommands.CleanCommand())
self.registerCommand(yumcommands.ProvidesCommand())
self.registerCommand(yumcommands.CheckUpdateCommand())
self.registerCommand(yumcommands.SearchCommand())
self.registerCommand(yumcommands.UpgradeCommand())
self.registerCommand(yumcommands.LocalInstallCommand())
self.registerCommand(yumcommands.ResolveDepCommand())
self.registerCommand(yumcommands.ShellCommand())
self.registerCommand(yumcommands.DepListCommand())
self.registerCommand(yumcommands.RepoListCommand())
self.registerCommand(yumcommands.HelpCommand())
self.registerCommand(yumcommands.ReInstallCommand())
self.registerCommand(yumcommands.DowngradeCommand())
self.registerCommand(yumcommands.VersionCommand())
self.registerCommand(yumcommands.HistoryCommand())
self.registerCommand(yumcommands.CheckRpmdbCommand())
self.registerCommand(yumcommands.DistroSyncCommand())
self.registerCommand(yumcommands.LoadTransactionCommand())
self.registerCommand(yumcommands.SwapCommand())
self.registerCommand(yumcommands.RepoPkgsCommand())
self.registerCommand(yumcommands.UpdateinfoCommand())
self.registerCommand(yumcommands.UpdateMinimalCommand())
self.registerCommand(yumcommands.FSSnapshotCommand())
self.registerCommand(yumcommands.FSCommand())
def registerCommand(self, command):
"""Register a :class:`yumcommands.YumCommand` so that it can be called by
any of the names returned by its
:func:`yumcommands.YumCommand.getNames` method.
:param command: the :class:`yumcommands.YumCommand` to register
"""
for name in command.getNames():
if name in self.yum_cli_commands:
raise yum.Errors.ConfigError(_('Command "%s" already defined') % name)
self.yum_cli_commands[name] = command
def doRepoSetup(self, thisrepo=None, dosack=1):
"""Grab the repomd.xml for each enabled and set up the basics
of the repository.
:param thisrepo: the repository to set up
:param dosack: whether to get the repo sack
"""
if self._repos and thisrepo is None:
return self._repos
if not thisrepo:
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Setting up repositories'))
# Call parent class to do the bulk of work
# (this also ensures that reposetup plugin hook is called)
if thisrepo:
yum.YumBase._getRepos(self, thisrepo=thisrepo, doSetup=True)
else:
yum.YumBase._getRepos(self, thisrepo=thisrepo)
if dosack: # so we can make the dirs and grab the repomd.xml but not import the md
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Reading repository metadata in from local files'))
self._getSacks(thisrepo=thisrepo)
return self._repos
def _makeUsage(self):
"""
Format an attractive usage string for yum, listing subcommand
names and summary usages.
"""
usage = 'yum [options] COMMAND\n\nList of Commands:\n\n'
commands = yum.misc.unique([x for x in self.yum_cli_commands.values()
if not (hasattr(x, 'hidden') and x.hidden)])
commands.sort(key=lambda x: x.getNames()[0])
for command in commands:
# XXX Remove this when getSummary is common in plugins
try:
summary = command.getSummary()
usage += "%-14s %s\n" % (command.getNames()[0], summary)
except (AttributeError, NotImplementedError):
usage += "%s\n" % command.getNames()[0]
return usage
def _parseSetOpts(self, setopts):
"""parse the setopts list handed to us and saves the results as
repo_setopts and main_setopts in the yumbase object"""
repoopts = {}
mainopts = yum.misc.GenericHolder()
mainopts.items = []
bad_setopt_tm = []
bad_setopt_ne = []
for item in setopts:
vals = item.split('=')
if len(vals) > 2:
bad_setopt_tm.append(item)
continue
if len(vals) < 2:
bad_setopt_ne.append(item)
continue
k, v = [i.strip() for i in vals]
period = k.rfind('.')
if period != -1:
repo = k[:period]
k = k[period+1:]
if repo not in repoopts:
repoopts[repo] = yum.misc.GenericHolder()
repoopts[repo].items = []
setattr(repoopts[repo], k, v)
repoopts[repo].items.append(k)
else:
setattr(mainopts, k, v)
mainopts.items.append(k)
self.main_setopts = mainopts
self.repo_setopts = repoopts
return bad_setopt_tm, bad_setopt_ne
def getOptionsConfig(self, args):
"""Parse command line arguments, and set up :attr:`self.conf` and
:attr:`self.cmds`, as well as logger objects in base instance.
:param args: a list of command line arguments
"""
self.optparser = YumOptionParser(base=self, usage=self._makeUsage())
# Parse only command line options that affect basic yum setup
opts = self.optparser.firstParse(args)
# Just print out the version if that's what the user wanted
if opts.version:
print yum.__version__
opts.quiet = True
opts.verbose = False
# go through all the setopts and set the global ones
bad_setopt_tm, bad_setopt_ne = self._parseSetOpts(opts.setopts)
if self.main_setopts:
for opt in self.main_setopts.items:
setattr(opts, opt, getattr(self.main_setopts, opt))
# get the install root to use
root = self.optparser.getRoot(opts)
if opts.quiet:
opts.debuglevel = 0
if opts.verbose:
opts.debuglevel = opts.errorlevel = 6
# Read up configuration options and initialise plugins
try:
pc = self.preconf
pc.fn = opts.conffile
pc.root = root
pc.init_plugins = not opts.noplugins
pc.plugin_types = (yum.plugins.TYPE_CORE,
yum.plugins.TYPE_INTERACTIVE)
pc.optparser = self.optparser
pc.debuglevel = opts.debuglevel
pc.errorlevel = opts.errorlevel
pc.disabled_plugins = self.optparser._splitArg(opts.disableplugins)
pc.enabled_plugins = self.optparser._splitArg(opts.enableplugins)
pc.releasever = opts.releasever
self.conf
for item in bad_setopt_tm:
msg = "Setopt argument has multiple values: %s"
self.logger.warning(msg % item)
for item in bad_setopt_ne:
msg = "Setopt argument has no value: %s"
self.logger.warning(msg % item)
# now set all the non-first-start opts from main from our setopts
if self.main_setopts:
for opt in self.main_setopts.items:
if not hasattr(self.conf, opt):
msg ="Main config did not have a %s attr. before setopt"
self.logger.warning(msg % opt)
setattr(self.conf, opt, getattr(self.main_setopts, opt))
except yum.Errors.ConfigError, e:
self.logger.critical(_('Config error: %s'), e)
sys.exit(1)
except IOError, e:
e = '%s: %s' % (to_unicode(e.args[1]), repr(e.filename))
self.logger.critical(_('Config error: %s'), e)
sys.exit(1)
except ValueError, e:
self.logger.critical(_('Options error: %s'), e)
sys.exit(1)
# update usage in case plugins have added commands
self.optparser.set_usage(self._makeUsage())
self.plugins.run('args', args=args)
# Now parse the command line for real and
# apply some of the options to self.conf
(opts, self.cmds) = self.optparser.setupYumConfig(args=args)
if opts.version:
opts.quiet = True
opts.verbose = False
# Check that firstParse didn't miss anything, and warn the user if it
# did ... because this is really magic, and unexpected.
if opts.quiet:
opts.debuglevel = 0
if opts.verbose:
opts.debuglevel = opts.errorlevel = 6
if opts.debuglevel != pc.debuglevel or opts.errorlevel != pc.errorlevel:
self.logger.warning(_("Ignored option -q, -v, -d or -e (probably due to merging: -yq != -y -q)"))
# getRoot() changes it, but then setupYumConfig() changes it back. So
# don't test for this, if we are using --installroot.
if root == '/' and opts.conffile != pc.fn:
self.logger.warning(_("Ignored option -c (probably due to merging -yc != -y -c)"))
if opts.version:
self.conf.cache = 1
yum_progs = self.run_with_package_names
done = False
def sm_ui_time(x):
return time.strftime("%Y-%m-%d %H:%M", time.gmtime(x))
def sm_ui_date(x): # For changelogs, there is no time
return time.strftime("%Y-%m-%d", time.gmtime(x))
for pkg in sorted(self.rpmdb.returnPackages(patterns=yum_progs)):
# We should only have 1 version of each...
if done: print ""
done = True
if pkg.epoch == '0':
ver = '%s-%s.%s' % (pkg.version, pkg.release, pkg.arch)
else:
ver = '%s:%s-%s.%s' % (pkg.epoch,
pkg.version, pkg.release, pkg.arch)
name = "%s%s%s" % (self.term.MODE['bold'], pkg.name,
self.term.MODE['normal'])
print _(" Installed: %s-%s at %s") %(name, ver,
sm_ui_time(pkg.installtime))
print _(" Built : %s at %s") % (to_unicode(pkg.packager),
sm_ui_time(pkg.buildtime))
print _(" Committed: %s at %s") % (to_unicode(pkg.committer),
sm_ui_date(pkg.committime))
sys.exit(0)
if opts.sleeptime is not None:
sleeptime = random.randrange(opts.sleeptime*60)
else:
sleeptime = 0
# save our original args out
self.args = args
# save out as a nice command string
self.cmdstring = 'yum '
for arg in self.args:
self.cmdstring += '%s ' % arg
try:
self.parseCommands() # before we return check over the base command + args
# make sure they match/make sense
except CliError:
sys.exit(1)
# run the sleep - if it's unchanged then it won't matter
time.sleep(sleeptime)
def parseCommands(self):
"""Read :attr:`self.cmds` and parse them out to make sure that
the requested base command and argument makes any sense at
all. This function will also set :attr:`self.basecmd` and
:attr:`self.extcmds`.
"""
self.verbose_logger.debug('Yum version: %s', yum.__version__)
self.verbose_logger.log(yum.logginglevels.DEBUG_4,
'COMMAND: %s', self.cmdstring)
self.verbose_logger.log(yum.logginglevels.DEBUG_4,
'Installroot: %s', self.conf.installroot)
if len(self.conf.commands) == 0 and len(self.cmds) < 1:
self.cmds = self.conf.commands
else:
self.conf.commands = self.cmds
if len(self.cmds) < 1:
self.logger.critical(_('You need to give some command'))
self.usage()
raise CliError
self.basecmd = self.cmds[0] # our base command
self.extcmds = self.cmds[1:] # out extended arguments/commands
if len(self.extcmds) > 0:
self.verbose_logger.log(yum.logginglevels.DEBUG_4,
'Ext Commands:\n')
for arg in self.extcmds:
self.verbose_logger.log(yum.logginglevels.DEBUG_4, ' %s', arg)
if self.basecmd not in self.yum_cli_commands:
self.logger.critical(_('No such command: %s. Please use %s --help'),
self.basecmd, sys.argv[0])
raise CliError
self._set_repos_cache_req()
self.yum_cli_commands[self.basecmd].doCheck(self, self.basecmd, self.extcmds)
def _set_repos_cache_req(self, warning=True):
""" Set the cacheReq attribute from the commands to the repos. """
cmd = self.yum_cli_commands[self.basecmd]
cacheReq = 'write'
if hasattr(cmd, 'cacheRequirement'):
cacheReq = cmd.cacheRequirement(self, self.basecmd, self.extcmds)
# The main thing we want to do here is that if the user has done a
# "yum makecache fast" or has yum-cron running or something, then try
# not to update the repo. caches ... thus. not turning 0.5s ops. into
# 100x longer ops.
# However if the repos. are not in sync. that's probably not going to
# work well (Eg. user enables updates-testing). Also give a warning if
# they are _really_ old.
ts_min = None
ts_max = None
for repo in self.repos.listEnabled():
try: rts = os.stat(repo.metadata_cookie).st_mtime
except (yum.Errors.RepoError, OSError):
ts_min = None
break
if not ts_min:
ts_min = rts
ts_max = rts
elif rts > ts_max:
ts_max = rts
elif rts < ts_min:
ts_min = rts
if ts_min:
# If caches are within 5 days of each other, they are ok to work
# together (lol, random numbers)...
if (ts_max - ts_min) > (60 * 60 * 24 * 5):
ts_min = None
elif ts_max > time.time():
ts_min = None
if not ts_min:
cacheReq = 'write'
elif warning and (time.time() - ts_max) > (60 * 60 * 24 * 14):
self.logger.warning(_("Repodata is over 2 weeks old. Install yum-cron? Or run: yum makecache fast"))
for repo in self.repos.sort():
repo._metadata_cache_req = cacheReq
def _shell_history_write(self):
if not hasattr(self, '_shell_history_cmds'):
return
if not self._shell_history_cmds:
return
data = self._shell_history_cmds
# Turn: [["a", "b"], ["c", "d"]] => "a b\nc d\n"
data = [" ".join(cmds) for cmds in data]
data.append('')
data = "\n".join(data)
self.history.write_addon_data('shell-cmds', data)
def doShell(self):
"""Run a shell-like interface for yum commands.
:return: a tuple containing the shell result number, and the
shell result messages
"""
yumshell = shell.YumShell(base=self)
# We share this array...
self._shell_history_cmds = yumshell._shell_history_cmds
if len(self.extcmds) == 0:
yumshell.cmdloop()
else:
yumshell.script()
del self._shell_history_cmds
return yumshell.result, yumshell.resultmsgs
def errorSummary(self, errstring):
"""Parse the error string for 'interesting' errors which can
be grouped, such as disk space issues.
:param errstring: the error string
:return: a string containing a summary of the errors
"""
summary = ''
# do disk space report first
p = re.compile('needs (\d+)(K|M)B on the (\S+) filesystem')
disk = {}
for m in p.finditer(errstring):
size_in_mb = int(m.group(1)) if m.group(2) == 'M' else round(int(m.group(1))/1024.0, 3)
if m.group(3) not in disk:
disk[m.group(3)] = size_in_mb
if disk[m.group(3)] < size_in_mb:
disk[m.group(3)] = size_in_mb
if disk:
summary += _('Disk Requirements:\n')
for k in disk:
summary += P_(' At least %dMB more space needed on the %s filesystem.\n', ' At least %dMB more space needed on the %s filesystem.\n', disk[k]) % (disk[k], k)
# TODO: simplify the dependency errors?
# Fixup the summary
summary = _('Error Summary\n-------------\n') + summary
return summary
def waitForLock(self):
"""Establish the yum lock. If another process is already
holding the yum lock, by default this method will keep trying
to establish the lock until it is successful. However, if
:attr:`self.conf.exit_on_lock` is set to True, it will
raise a :class:`Errors.YumBaseError`.
"""
lockerr = ""
while True:
try:
self.doLock()
except yum.Errors.LockError, e:
if exception2msg(e) != lockerr:
lockerr = exception2msg(e)
self.logger.critical(lockerr)
if e.errno:
raise yum.Errors.YumBaseError, _("Can't create lock file; exiting")
if not self.conf.exit_on_lock:
self.logger.critical("Another app is currently holding the yum lock; waiting for it to exit...")
import utils
utils.show_lock_owner(e.pid, self.logger)
time.sleep(2)
else:
raise yum.Errors.YumBaseError, _("Another app is currently holding the yum lock; exiting as configured by exit_on_lock")
else:
break
def doCommands(self):
"""Call the base command, and pass it the extended commands or
arguments.
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
# at this point we know the args are valid - we don't know their meaning
# but we know we're not being sent garbage
# setup our transaction set if the command we're using needs it
# compat with odd modules not subclassing YumCommand
needTs = True
needTsRemove = False
cmd = self.yum_cli_commands[self.basecmd]
if hasattr(cmd, 'needTs'):
needTs = cmd.needTs(self, self.basecmd, self.extcmds)
if not needTs and hasattr(cmd, 'needTsRemove'):
needTsRemove = cmd.needTsRemove(self, self.basecmd, self.extcmds)
if needTs or needTsRemove:
try:
self._getTs(needTsRemove)
except yum.Errors.YumBaseError, e:
return 1, [exception2msg(e)]
# This should already have been done at doCheck() time, but just in
# case repos. got added or something do it again.
self._set_repos_cache_req(warning=False)
return self.yum_cli_commands[self.basecmd].doCommand(self, self.basecmd, self.extcmds)
def doTransaction(self, inhibit={'what' : 'shutdown:idle',
'who' : 'yum API',
'why' : 'Running transaction', # i18n?
'mode' : 'block'}):
"""Take care of package downloading, checking, user
confirmation and actually running the transaction.
:return: a numeric return code, and optionally a list of
errors. A negative return code indicates that errors
occurred in the pre-transaction checks
"""
def _downloadonly_userconfirm(self):
# Note that we shouldn't just remove the 'd' option, or the options
# yum accepts will be different which is bad. So always accept it,
# but change the prompt.
dl_only = {'downloadonly' :
(u'd', _('d'), _('download'),
_('downloadonly'))}
if not stuff_to_download:
ret = self.userconfirm(extra=dl_only)
if ret == 'downloadonly':
ret = None
return ret
return self.userconfirm(prompt=_('Is this ok [y/d/N]: '),
extra=dl_only)
# just make sure there's not, well, nothing to do
if len(self.tsInfo) == 0:
self.verbose_logger.info(_('Trying to run the transaction but nothing to do. Exiting.'))
return -1
# NOTE: In theory we can skip this in -q -y mode, for a slight perf.
# gain. But it's probably doom to have a different code path.
lsts = self.listTransaction()
if self.verbose_logger.isEnabledFor(yum.logginglevels.INFO_1):
self.verbose_logger.log(yum.logginglevels.INFO_1, lsts)
elif self.conf.assumeno or not self.conf.assumeyes:
# If we are in quiet, and assumeyes isn't on we want to output
# at least the transaction list anyway.
self.logger.warn(lsts)
# Check which packages have to be downloaded
downloadpkgs = []
rmpkgs = []
stuff_to_download = False
install_only = True
for txmbr in self.tsInfo.getMembers():
if txmbr.ts_state not in ('i', 'u'):
install_only = False
po = txmbr.po
if po:
rmpkgs.append(po)
else:
stuff_to_download = True
po = txmbr.po
if po:
downloadpkgs.append(po)
# Close the connection to the rpmdb so that rpm doesn't hold the SIGINT
# handler during the downloads. self.ts is reinitialised later in this
# function anyway (initActionTs).
self.ts.close()
# Report the total download size to the user, so he/she can base
# the answer on this info
if not stuff_to_download:
self.reportRemoveSize(rmpkgs)
else:
self.reportDownloadSize(downloadpkgs, install_only)
cfr = self.tsInfo._check_future_rpmdbv
if (cfr is not None and
self.tsInfo.futureRpmDBVersion() != cfr[1]):
msg = _("future rpmdb ver mismatched saved transaction version,")
if cfr[2]:
msg += _(" ignoring, as requested.")
self.logger.critical(_(msg))
else:
msg += _(" aborting.")
raise yum.Errors.YumBaseError(msg)
# confirm with user
if self._promptWanted():
uc = None
if not self.conf.assumeno:
uc = _downloadonly_userconfirm(self)
if not uc:
self.verbose_logger.info(_('Exiting on user command'))
return -1
elif uc == 'downloadonly':
self.conf.downloadonly = True
if self.conf.downloadonly:
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Background downloading packages, then exiting:'))
else:
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Downloading packages:'))
problems = self.downloadPkgs(downloadpkgs, callback_total=self.download_callback_total_cb)
if len(problems) > 0:
errstring = ''
errstring += _('Error downloading packages:\n')
for key in problems:
errors = yum.misc.unique(problems[key])
for error in errors:
errstring += ' %s: %s\n' % (key, error)
raise yum.Errors.YumBaseError, errstring
# Check GPG signatures
if self.gpgsigcheck(downloadpkgs) != 0:
return -1
self.initActionTs()
# save our dsCallback out
dscb = self.dsCallback
self.dsCallback = None # dumb, dumb dumb dumb!
self.populateTs(keepold=0) # sigh
rcd_st = time.time()
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Running transaction check'))
msgs = self._run_rpm_check()
depsolve = False
if msgs:
rpmlib_only = True
for msg in msgs:
if msg.startswith('rpmlib('):
continue
rpmlib_only = False
if rpmlib_only:
print _("ERROR You need to update rpm to handle:")
else:
print _('ERROR with transaction check vs depsolve:')
depsolve = True
for msg in msgs:
print to_utf8(msg)
if rpmlib_only:
return 1, [_('RPM needs to be updated')]
if depsolve:
return 1, []
else:
return 1, [_('Please report this error in %s') % self.conf.bugtracker_url]
self.verbose_logger.debug('Transaction check time: %0.3f' % (time.time() - rcd_st))
tt_st = time.time()
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Running transaction test'))
self.ts.order() # order the transaction
self.ts.clean() # release memory not needed beyond this point
testcb = RPMTransaction(self, test=True)
tserrors = self.ts.test(testcb)
del testcb
if len(tserrors) > 0:
errstring = _('Transaction check error:\n')
for descr in tserrors:
errstring += ' %s\n' % to_unicode(descr)
raise yum.Errors.YumBaseError, errstring + '\n' + \
self.errorSummary(errstring)
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Transaction test succeeded'))
self.verbose_logger.debug('Transaction test time: %0.3f' % (time.time() - tt_st))
# unset the sigquit handler
signal.signal(signal.SIGQUIT, signal.SIG_DFL)
ts_st = time.time()
# Reinstalls broke in: 7115478c527415cb3c8317456cdf50024de89a94 ...
# I assume there's a "better" fix, but this fixes reinstalls and lets
# other options continue as is (and they seem to work).
have_reinstalls = False
for txmbr in self.tsInfo.getMembers():
if txmbr.reinstall:
have_reinstalls = True
break
if have_reinstalls:
self.initActionTs() # make a new, blank ts to populate
self.populateTs(keepold=0) # populate the ts
self.ts.check() #required for ordering
self.ts.order() # order
self.ts.clean() # release memory not needed beyond this point
# put back our depcheck callback
self.dsCallback = dscb
# setup our rpm ts callback
cb = RPMTransaction(self,
display=output.YumCliRPMCallBack(weakref(self)))
if self.conf.debuglevel < 2:
cb.display.output = False
inhibited = False
if inhibit:
fd = sys_inhibit(inhibit['what'], inhibit['who'],
inhibit['why'], inhibit['mode'])
if fd is not None:
msg = _('Running transaction (shutdown inhibited)')
inhibited = True
if not inhibited:
msg = _('Running transaction')
self.verbose_logger.log(yum.logginglevels.INFO_2, msg)
resultobject = self.runTransaction(cb=cb)
# fd is either None or dbus.UnifFD() and the real API to close is thus:
# if fd is not None: os.close(fd.take())
# ...but this is easier, doesn't require a test and works.
del fd
self.verbose_logger.debug('Transaction time: %0.3f' % (time.time() - ts_st))
# close things
self.verbose_logger.log(yum.logginglevels.INFO_1,
self.postTransactionOutput())
# put back the sigquit handler
signal.signal(signal.SIGQUIT, sigquit)
return resultobject.return_code
def gpgsigcheck(self, pkgs):
"""Perform GPG signature verification on the given packages,
installing keys if possible.
:param pkgs: a list of package objects to verify the GPG
signatures of
:return: non-zero if execution should stop due to an error
:raises: Will raise :class:`YumBaseError` if there's a problem
"""
for po in pkgs:
result, errmsg = self.sigCheckPkg(po)
if result == 0:
# Verified ok, or verify not req'd
continue
elif result == 1:
ay = self.conf.assumeyes and not self.conf.assumeno
if not sys.stdin.isatty() and not ay:
raise yum.Errors.YumBaseError, \
_('Refusing to automatically import keys when running ' \
'unattended.\nUse "-y" to override.')
# the callback here expects to be able to take options which
# userconfirm really doesn't... so fake it
self.getKeyForPackage(po, lambda x, y, z: self.userconfirm())
else:
# Fatal error
raise yum.Errors.YumBaseError, errmsg
return 0
def _maybeYouMeant(self, arg):
""" If install argument doesn't match with case, tell the user. """
matches = self.doPackageLists(patterns=[arg], ignore_case=True)
matches = matches.installed + matches.available
matches = set(map(lambda x: x.name, matches))
if matches:
msg = self.fmtKeyValFill(_(' * Maybe you meant: '),
", ".join(matches))
self.verbose_logger.log(yum.logginglevels.INFO_2, to_unicode(msg))
def _checkMaybeYouMeant(self, arg, always_output=True, rpmdb_only=False):
""" If the update/remove argument doesn't match with case, or due
to not being installed, tell the user. """
# always_output is a wart due to update/remove not producing the
# same output.
# if it is a grouppattern then none of this is going to make any sense
# skip it.
if not arg or arg[0] == '@':
return
pkgnarrow='all'
if rpmdb_only:
pkgnarrow='installed'
matches = self.doPackageLists(pkgnarrow=pkgnarrow, patterns=[arg], ignore_case=False)
if (matches.installed or (not matches.available and
self.returnInstalledPackagesByDep(arg))):
return # Found a match so ignore
hibeg = self.term.MODE['bold']
hiend = self.term.MODE['normal']
if matches.available:
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Package(s) %s%s%s available, but not installed.'),
hibeg, arg, hiend)
return
# No package name, so do the maybeYouMeant thing here too
matches = self.doPackageLists(pkgnarrow=pkgnarrow, patterns=[arg], ignore_case=True)
if not matches.installed and matches.available:
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Package(s) %s%s%s available, but not installed.'),
hibeg, arg, hiend)
return
matches = set(map(lambda x: x.name, matches.installed))
if always_output or matches:
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('No package %s%s%s available.'),
hibeg, arg, hiend)
if matches:
msg = self.fmtKeyValFill(_(' * Maybe you meant: '),
", ".join(matches))
self.verbose_logger.log(yum.logginglevels.INFO_2, msg)
def _install_upgraded_requires(self, txmbrs):
"""Go through the given txmbrs, and for any to be installed packages
look for their installed deps. and try to upgrade them, if the
configuration is set. Returning any new transaction members to be
isntalled.
:param txmbrs: a list of :class:`yum.transactioninfo.TransactionMember` objects
:return: a list of :class:`yum.transactioninfo.TransactionMember` objects
"""
if not self.conf.upgrade_requirements_on_install:
return []
ret = []
done = set()
def _pkg2ups(pkg, reqpo=None):
if pkg.name in done:
return []
if reqpo is None:
reqpo = pkg
done.add(pkg.name)
uret = []
for req in pkg.requires:
for npkg in self.returnInstalledPackagesByDep(req):
if npkg.name in done:
continue
uret += self.update(name=npkg.name, requiringPo=reqpo)
uret += _pkg2ups(npkg, reqpo=reqpo)
return uret
for txmbr in txmbrs:
for rtxmbr, T in txmbr.relatedto:
ret += _pkg2ups(rtxmbr)
ret += _pkg2ups(txmbr.po)
return ret
def installPkgs(self, userlist, basecmd='install', repoid=None):
"""Attempt to take the user specified list of packages or
wildcards and install them, or if they are installed, update
them to a newer version. If a complete version number is
specified, attempt to upgrade (or downgrade if they have been
removed) them to the specified version.
:param userlist: a list of names or wildcards specifying
packages to install
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
# get the list of available packages
# iterate over the user's list
# add packages to Transaction holding class if they match.
# if we've added any packages to the transaction then return 2 and a string
# if we've hit a snag, return 1 and the failure explanation
# if we've got nothing to do, return 0 and a 'nothing available to install' string
oldcount = len(self.tsInfo)
done = False
for arg in userlist:
if (arg.endswith('.rpm') and (yum.misc.re_remote_url(arg) or
os.path.exists(arg))):
txmbrs = self.installLocal(arg)
self._install_upgraded_requires(txmbrs)
continue # it was something on disk and it ended in rpm
# no matter what we don't go looking at repos
try:
if False: pass
elif basecmd == 'install-n':
txmbrs = self.install(name=arg)
elif basecmd == 'install-na':
try:
n,a = arg.rsplit('.', 1)
except:
self.verbose_logger.warning(_('Bad %s argument %s.'),
basecmd, arg)
continue
txmbrs = self.install(name=n, arch=a)
elif basecmd == 'install-nevra':
try:
nevr,a = arg.rsplit('.', 1)
n,ev,r = nevr.rsplit('-', 2)
e,v = ev.split(':', 1)
except:
self.verbose_logger.warning(_('Bad %s argument %s.'),
basecmd, arg)
continue
txmbrs = self.install(name=n,
epoch=e, version=v, release=r, arch=a)
else:
assert basecmd == 'install', basecmd
txmbrs = self.install(pattern=arg)
except yum.Errors.GroupInstallError, e:
self.verbose_logger.log(yum.logginglevels.INFO_2, e)
except yum.Errors.InstallError:
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('No package %s%s%s available.'),
self.term.MODE['bold'], arg,
self.term.MODE['normal'])
self._maybeYouMeant(arg)
else:
done = True
self._install_upgraded_requires(txmbrs)
if len(self.tsInfo) > oldcount:
change = len(self.tsInfo) - oldcount
return 2, [P_('%d package to install', '%d packages to install', change) % change]
if not done:
return 1, [_('Nothing to do')]
return 0, [_('Nothing to do')]
def updatePkgs(self, userlist, quiet=0, update_to=False):
"""Take user commands and populate transaction wrapper with
packages to be updated.
:param userlist: a list of names or wildcards specifying
packages to update. If *userlist* is an empty list, yum
will perform a global update
:param quiet: unused
:param update_to: if *update_to* is True, the update will only
be run if it will update the given package to the given
version. For example, if the package foo-1-2 is installed,
updatePkgs(["foo-1-2], update_to=False) will work
identically to updatePkgs(["foo"]), but
updatePkgs(["foo-1-2"], update_to=True) will do nothing
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
# if there is no userlist, then do global update below
# this is probably 90% of the calls
# if there is a userlist then it's for updating pkgs, not obsoleting
oldcount = len(self.tsInfo)
if len(userlist) == 0: # simple case - do them all
self.update()
else:
# go through the userlist - look for items that are local rpms. If we find them
# pass them off to installLocal() and then move on
for item in userlist:
if (item.endswith('.rpm') and (yum.misc.re_remote_url(item) or
os.path.exists(item))):
txmbrs = self.installLocal(item, updateonly=1)
self._install_upgraded_requires(txmbrs)
continue
txmbrs = self.update(pattern=item, update_to=update_to)
self._install_upgraded_requires(txmbrs)
if not txmbrs:
self._checkMaybeYouMeant(item)
if len(self.tsInfo) > oldcount:
change = len(self.tsInfo) - oldcount
return 2, [P_('%d package marked for update', '%d packages marked for update', change) % change]
else:
return 0, [_('No packages marked for update')]
# Note that we aren't in __init__ yet for a couple of reasons, but we
# probably will get there for 3.2.28.
def distroSyncPkgs(self, userlist):
"""Upgrade or downgrade packages to match the latest versions
available in the enabled repositories.
:param userlist: list of names or wildcards specifying
packages to synchronize with the repositories. If the
first string in *userlist* is "full", packages will also be
reinstalled if their checksums do not match the checksums
in the repositories. If *userlist* is an empty list or
only contains "full", every installed package will be
synchronized
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
level = 'diff'
if userlist and userlist[0] in ('full', 'diff', 'different'):
level = userlist[0]
userlist = userlist[1:]
if level == 'different':
level = 'diff'
dupdates = []
ipkgs = {}
for pkg in sorted(self.rpmdb.returnPackages(patterns=userlist)):
ipkgs[pkg.name] = pkg
obsoletes = []
if self.conf.obsoletes:
obsoletes = self.up.getObsoletesTuples(newest=1)
for (obsoleting, installed) in obsoletes:
if installed[0] not in ipkgs:
continue
dupdates.extend(self.update(pkgtup=installed))
for (obsoleting, installed) in obsoletes:
if installed[0] not in ipkgs:
continue
del ipkgs[installed[0]]
apkgs = {}
pkgs = []
if ipkgs:
try:
pkgs = self.pkgSack.returnNewestByName(patterns=ipkgs.keys())
except yum.Errors.PackageSackError:
pkgs = []
for pkg in pkgs:
if pkg.name not in ipkgs:
continue
apkgs[pkg.name] = pkg
for ipkgname in ipkgs:
if ipkgname not in apkgs:
continue
ipkg = ipkgs[ipkgname]
apkg = apkgs[ipkgname]
if ipkg.verEQ(apkg): # Latest installed == Latest avail.
if level == 'diff':
continue
# level == full: do reinstalls if checksum doesn't match.
# do removals, if older installed versions.
for napkg in self.rpmdb.searchNames([ipkgname]):
if (not self.allowedMultipleInstalls(apkg) and
not napkg.verEQ(ipkg)):
dupdates.extend(self.remove(po=napkg))
continue
nayi = napkg.yumdb_info
found = False
for apkg in self.pkgSack.searchPkgTuple(napkg.pkgtup):
if ('checksum_type' in nayi and
'checksum_data' in nayi and
nayi.checksum_type == apkg.checksum_type and
nayi.checksum_data == apkg.pkgId):
found = True
break
if found:
continue
dupdates.extend(self.reinstall(pkgtup=napkg.pkgtup))
continue
if self.allowedMultipleInstalls(apkg):
found = False
for napkg in self.rpmdb.searchNames([apkg.name]):
if napkg.verEQ(apkg):
found = True
elif napkg.verGT(apkg):
dupdates.extend(self.remove(po=napkg))
if found:
continue
dupdates.extend(self.install(pattern=apkg.name))
elif ipkg.verLT(apkg):
n,a,e,v,r = apkg.pkgtup
dupdates.extend(self.update(name=n, epoch=e, ver=v, rel=r))
else:
n,a,e,v,r = apkg.pkgtup
dupdates.extend(self.downgrade(name=n, epoch=e, ver=v, rel=r))
if dupdates:
return 2, [P_('%d package marked for distribution synchronization', '%d packages marked for distribution synchronization', len(dupdates)) % len(dupdates)]
else:
return 0, [_('No packages marked for distribution synchronization')]
def erasePkgs(self, userlist, pos=False, basecmd='remove'):
"""Take user commands and populate a transaction wrapper with
packages to be erased.
:param userlist: a list of names or wildcards specifying
packages to erase
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
all_rms = []
for arg in userlist:
if pos:
rms = self.remove(po=arg)
if rms:
all_rms.extend(rms)
continue
if False: pass
elif basecmd in ('erase-n', 'remove-n'):
rms = self.remove(name=arg)
elif basecmd in ('erase-na', 'remove-na'):
try:
n,a = arg.rsplit('.', 1)
except:
self.verbose_logger.warning(_('Bad %s argument %s.'),
basecmd, arg)
continue
rms = self.remove(name=n, arch=a)
elif basecmd in ('erase-nevra', 'remove-nevra'):
try:
nevr,a = arg.rsplit('.', 1)
n,ev,r = nevr.rsplit('-', 2)
e,v = ev.split(':', 1)
except:
self.verbose_logger.warning(_('Bad %s argument %s.'),
basecmd, arg)
continue
rms = self.remove(name=n, epoch=e, version=v, release=r, arch=a)
else:
assert basecmd in ('erase', 'remove'), basecmd
rms = self.remove(pattern=arg)
if not rms:
self._checkMaybeYouMeant(arg, always_output=False, rpmdb_only=True)
all_rms.extend(rms)
if all_rms:
return 2, [P_('%d package marked for removal', '%d packages marked for removal', len(all_rms)) % len(all_rms)]
else:
return 0, [_('No Packages marked for removal')]
def downgradePkgs(self, userlist):
"""Attempt to take the user specified list of packages or
wildcards and downgrade them. If a complete version number if
specified, attempt to downgrade them to the specified version
:param userlist: a list of names or wildcards specifying
packages to downgrade
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
oldcount = len(self.tsInfo)
done = False
for arg in userlist:
if (arg.endswith('.rpm') and (yum.misc.re_remote_url(arg) or
os.path.exists(arg))):
self.downgradeLocal(arg)
continue # it was something on disk and it ended in rpm
# no matter what we don't go looking at repos
try:
self.downgrade(pattern=arg)
except yum.Errors.DowngradeError:
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('No package %s%s%s available.'),
self.term.MODE['bold'], arg,
self.term.MODE['normal'])
self._maybeYouMeant(arg)
else:
done = True
if len(self.tsInfo) > oldcount:
change = len(self.tsInfo) - oldcount
return 2, [P_('%d package to downgrade', '%d packages to downgrade', change) % change]
if not done:
return 1, [_('Nothing to do')]
return 0, [_('Nothing to do')]
def reinstallPkgs(self, userlist):
"""Attempt to take the user specified list of packages or
wildcards and reinstall them.
:param userlist: a list of names or wildcards specifying
packages to reinstall
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
oldcount = len(self.tsInfo)
done = False
for arg in userlist:
if (arg.endswith('.rpm') and (yum.misc.re_remote_url(arg) or
os.path.exists(arg))):
txmbrs = self.reinstallLocal(arg)
self._install_upgraded_requires(txmbrs)
continue # it was something on disk and it ended in rpm
# no matter what we don't go looking at repos
try:
txmbrs = self.reinstall(pattern=arg)
except yum.Errors.ReinstallRemoveError:
self._checkMaybeYouMeant(arg, always_output=False)
except yum.Errors.ReinstallInstallError, e:
for ipkg in e.failed_pkgs:
xmsg = ''
if 'from_repo' in ipkg.yumdb_info:
xmsg = ipkg.yumdb_info.from_repo
xmsg = _(' (from %s)') % xmsg
msg = _('Installed package %s%s%s%s not available.')
self.verbose_logger.log(yum.logginglevels.INFO_2, msg,
self.term.MODE['bold'], ipkg,
self.term.MODE['normal'], xmsg)
except yum.Errors.ReinstallError, e:
assert False, "Shouldn't happen, but just in case"
self.verbose_logger.log(yum.logginglevels.INFO_2, e)
else:
done = True
self._install_upgraded_requires(txmbrs)
if len(self.tsInfo) > oldcount:
change = len(self.tsInfo) - oldcount
return 2, [P_('%d package to reinstall', '%d packages to reinstall', change) % change]
if not done:
return 1, [_('Nothing to do')]
return 0, [_('Nothing to do')]
def localInstall(self, filelist, updateonly=0):
"""Install or update rpms provided on the file system in a
local directory (i.e. not from a repository).
:param filelist: a list of names specifying local rpms
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
# read in each package into a YumLocalPackage Object
# append it to self.localPackages
# check if it can be installed or updated based on nevra versus rpmdb
# don't import the repos until we absolutely need them for depsolving
if len(filelist) == 0:
return 0, [_('No packages provided')]
installing = False
for pkg in filelist:
if not pkg.endswith('.rpm'):
self.verbose_logger.log(yum.logginglevels.INFO_2,
"Skipping: %s, filename does not end in .rpm.", pkg)
continue
txmbrs = self.installLocal(pkg, updateonly=updateonly)
if txmbrs:
installing = True
if installing:
return 2, [_('Package(s) to install')]
return 0, [_('Nothing to do')]
def returnPkgLists(self, extcmds, installed_available=False, repoid=None):
"""Return a :class:`yum.misc.GenericHolder` object containing
lists of package objects that match the given names or wildcards.
:param extcmds: a list of names or wildcards specifying
packages to list
:param installed_available: whether the available package list
is present as .hidden_available when doing all, available,
or installed
:param repoid: a repoid that all packages should belong to
:return: a :class:`yum.misc.GenericHolder` instance with the
following lists defined::
available = list of packageObjects
installed = list of packageObjects
updates = tuples of packageObjects (updating, installed)
extras = list of packageObjects
obsoletes = tuples of packageObjects (obsoleting, installed)
recent = list of packageObjects
"""
special = ['available', 'installed', 'all', 'extras', 'updates', 'recent',
'obsoletes', 'distro-extras']
pkgnarrow = 'all'
done_hidden_available = False
done_hidden_installed = False
if len(extcmds) > 0:
if installed_available and extcmds[0] == 'installed':
done_hidden_available = True
extcmds.pop(0)
elif installed_available and extcmds[0] == 'available':
done_hidden_installed = True
extcmds.pop(0)
elif extcmds[0] in special:
pkgnarrow = extcmds.pop(0)
ypl = self.doPackageLists(pkgnarrow=pkgnarrow, patterns=extcmds,
ignore_case=True, repoid=repoid)
if self.conf.showdupesfromrepos:
ypl.available += ypl.reinstall_available
if installed_available:
ypl.hidden_available = ypl.available
ypl.hidden_installed = ypl.installed
if done_hidden_available:
ypl.available = []
if done_hidden_installed:
ypl.installed = []
return ypl
def search(self, args):
"""Search for simple text tags in a package object. This is a
cli wrapper method for the module search function.
:param args: list of names or wildcards to search for.
Normally this method will begin by searching the package
names and summaries, and will only search urls and
descriptions if that fails. However, if the first string
in *args* is "all", this method will always search
everything
:return: a tuple where the first item is an exit code, and
the second item is a generator if the search is a
successful, and a list of error messages otherwise
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
# call the yum module search function with lists of tags to search
# and what to search for
# display the list of matches
searchlist = ['name', 'summary', 'description', 'url']
dups = self.conf.showdupesfromrepos
args = map(to_unicode, args)
okeys = set()
akeys = set() # All keys, used to see if nothing matched
mkeys = set() # "Main" set of keys for N/S search (biggest term. hit).
pos = set()
def _print_match_section(text):
# Print them in the order they were passed
used_keys = [arg for arg in args if arg in keys]
print self.fmtSection(text % ", ".join(used_keys))
# First try just the name/summary fields, and if we get any hits
# don't do the other stuff. Unless the user overrides via. "all".
if len(args) > 1 and args[0] == 'all':
args.pop(0)
else:
matching = self.searchGenerator(['name', 'summary'], args,
showdups=dups, keys=True)
for (po, keys, matched_value) in matching:
if keys != okeys:
if akeys:
if len(mkeys) == len(args):
break
print ""
else:
mkeys = set(keys)
_print_match_section(_('N/S matched: %s'))
okeys = keys
pos.add(po)
akeys.update(keys)
self.matchcallback(po, matched_value, args)
matching = self.searchGenerator(searchlist, args,
showdups=dups, keys=True)
okeys = set()
# If we got a hit with just name/summary then we only care about hits
# with _more_ search terms. Thus. if we hit all our search terms. do
# nothing.
if len(mkeys) == len(args):
print ""
if len(args) == 1:
msg = _(' Name and summary matches %sonly%s, use "search all" for everything.')
else:
msg = _(' Full name and summary matches %sonly%s, use "search all" for everything.')
print msg % (self.term.MODE['bold'], self.term.MODE['normal'])
matching = []
for (po, keys, matched_value) in matching:
# Don't print matches for "a", "b", "c" on N+S+D when we already
# matched that on just N+S.
if len(keys) <= len(mkeys):
continue
# Just print the highest level of full matches, when we did
# minimal matches. Ie. "A", "B" match N+S, just print the
# "A", "B", "C", "D" full match, and not the "B", "C", "D" matches.
if mkeys and len(keys) < len(okeys):
continue
if keys != okeys:
if akeys:
print ""
_print_match_section(_('Matched: %s'))
okeys = keys
akeys.update(keys)
self.matchcallback(po, matched_value, args)
if mkeys and len(mkeys) != len(args):
print ""
print _(' Name and summary matches %smostly%s, use "search all" for everything.') % (self.term.MODE['bold'], self.term.MODE['normal'])
for arg in args:
if arg not in akeys:
self.logger.warning(_('Warning: No matches found for: %s'), arg)
if not akeys:
return 0, [_('No matches found')]
return 0, matching
def deplist(self, args):
"""Print out a formatted list of dependencies for a list of
packages. This is a cli wrapper method for
:class:`yum.YumBase.findDeps`.
:param args: a list of names or wildcards specifying packages
that should have their dependenices printed
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
pkgs = []
for arg in args:
if (arg.endswith('.rpm') and (yum.misc.re_remote_url(arg) or
os.path.exists(arg))):
thispkg = yum.packages.YumUrlPackage(self, self.ts, arg)
pkgs.append(thispkg)
elif self.conf.showdupesfromrepos:
pkgs.extend(self.pkgSack.returnPackages(patterns=[arg],
ignore_case=True))
else:
try:
pkgs.extend(self.pkgSack.returnNewestByName(patterns=[arg],
ignore_case=True))
except yum.Errors.PackageSackError:
pass
results = self.findDeps(pkgs)
self.depListOutput(results)
return 0, []
def provides(self, args):
"""Print out a list of packages that provide the given file or
feature. This a cli wrapper to the provides methods in the
rpmdb and pkgsack.
:param args: the name of a file or feature to search for
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
old_sdup = self.conf.showdupesfromrepos
# For output, as searchPackageProvides() is always in showdups mode
self.conf.showdupesfromrepos = True
cb = self.matchcallback_verbose
matching = self.searchPackageProvides(args, callback=cb,
callback_has_matchfor=True)
if len(matching) == 0:
# Try to be a bit clever, for commands, and python modules.
# Maybe want something so we can do perl/etc. too?
paths = set(sys.path + os.environ['PATH'].split(':'))
nargs = []
for arg in args:
if not arg:
continue
if yum.misc.re_filename(arg) or yum.misc.re_glob(arg):
continue
for path in paths:
if not path:
continue
nargs.append("%s/%s" % (path, arg))
matching = self.searchPackageProvides(nargs, callback=cb,
callback_has_matchfor=True)
self.conf.showdupesfromrepos = old_sdup
if len(matching) == 0:
return 0, ['No matches found']
return 0, []
def resolveDepCli(self, args):
"""Print information about a package that provides the given
dependency. Only one package will be printed per dependency.
:param args: a list of strings specifying dependencies to
search for
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
not_found = set()
for arg in args:
try:
ipkg = self.returnInstalledPackageByDep(arg)
except yum.Errors.YumBaseError:
ipkg = None
else:
self.verbose_logger.info(" %s:", arg)
self.verbose_logger.info("%s %s" % (ipkg.envra,
ipkg.ui_from_repo))
try:
pkg = self.returnPackageByDep(arg)
except yum.Errors.YumBaseError:
if not ipkg:
not_found.add(arg)
else:
if not ipkg:
self.verbose_logger.info(" %s:", arg)
if not pkg.verEQ(ipkg):
self.verbose_logger.info("%s %s" % (pkg.envra,
pkg.ui_from_repo))
if not_found:
self.logger.critical(_('Error: No packages found for:\n %s'),
"\n ".join(sorted(not_found)))
return 0, []
def cleanCli(self, userlist):
"""Remove data from the yum cache directory. What data is
removed depends on the options supplied by the user.
:param userlist: a list of options. The following are valid
options::
expire-cache = Eliminate the local data saying when the
metadata and mirror lists were downloaded for each
repository.
packages = Eliminate any cached packages
headers = Eliminate the header files, which old versions
of yum used for dependency resolution
metadata = Eliminate all of the files which yum uses to
determine the remote availability of packages
dbcache = Eliminate the sqlite cache used for faster
access to metadata
rpmdb = Eliminate any cached datat from the local rpmdb
plugins = Tell any enabled plugins to eliminate their
cached data
all = do all of the above
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
hdrcode = pkgcode = xmlcode = dbcode = expccode = 0
pkgresults = hdrresults = xmlresults = dbresults = expcresults = []
msg = self.fmtKeyValFill(_('Cleaning repos: '),
' '.join([ x.id for x in self.repos.listEnabled()]))
self.verbose_logger.log(yum.logginglevels.INFO_2, msg)
if 'all' in userlist:
self.verbose_logger.log(yum.logginglevels.INFO_2,
_('Cleaning up everything'))
pkgcode, pkgresults = self.cleanPackages()
hdrcode, hdrresults = self.cleanHeaders()
xmlcode, xmlresults = self.cleanMetadata()
dbcode, dbresults = self.cleanSqlite()
rpmcode, rpmresults = self.cleanRpmDB()
self.plugins.run('clean')
code = hdrcode + pkgcode + xmlcode + dbcode + rpmcode
results = (hdrresults + pkgresults + xmlresults + dbresults +
rpmresults)
for msg in results:
self.logger.debug(msg)
return code, []
if 'headers' in userlist:
self.logger.debug(_('Cleaning up headers'))
hdrcode, hdrresults = self.cleanHeaders()
if 'packages' in userlist:
self.logger.debug(_('Cleaning up packages'))
pkgcode, pkgresults = self.cleanPackages()
if 'metadata' in userlist:
self.logger.debug(_('Cleaning up xml metadata'))
xmlcode, xmlresults = self.cleanMetadata()
if 'dbcache' in userlist or 'metadata' in userlist:
self.logger.debug(_('Cleaning up database cache'))
dbcode, dbresults = self.cleanSqlite()
if 'expire-cache' in userlist or 'metadata' in userlist:
self.logger.debug(_('Cleaning up expire-cache metadata'))
expccode, expcresults = self.cleanExpireCache()
if 'rpmdb' in userlist:
self.logger.debug(_('Cleaning up cached rpmdb data'))
expccode, expcresults = self.cleanRpmDB()
if 'plugins' in userlist:
self.logger.debug(_('Cleaning up plugins'))
self.plugins.run('clean')
code = hdrcode + pkgcode + xmlcode + dbcode + expccode
results = hdrresults + pkgresults + xmlresults + dbresults + expcresults
for msg in results:
self.verbose_logger.log(yum.logginglevels.INFO_2, msg)
return code, []
def returnGroupLists(self, userlist):
"""Print out a list of groups that match the given names or
wildcards.
:param extcmds: a list of names or wildcards specifying
groups to list
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
return self._returnGroupLists(userlist)
def _returnGroupLists(self, userlist, summary=False):
# What data are we showing...
wts_map = {'hidden' : 'hidden',
'language' : 'lang',
'languages' : 'lang',
'lang' : 'lang',
'langs' : 'lang',
'environment' : 'env',
'environments' : 'env',
'env' : 'env',
'envs' : 'env',
'package' : 'pkg',
'packages' : 'pkg',
'pkg' : 'pkg',
'pkgs' : 'pkg',
'available' : 'avail',
'avail' : 'avail',
'installed' : 'inst',
'inst' : 'inst',
'id' : 'id',
'ids' : 'id',
}
verb = self.verbose_logger.isEnabledFor(yum.logginglevels.DEBUG_3)
wts = {'hidden' : False,
'lang' : None,
'env' : None,
'pkg' : None,
'inst' : None,
'avail' : None,
'id' : verb}
ouserlist = userlist[:]
while userlist:
arg = userlist[0]
val = True
if arg.startswith('no'):
arg = arg[2:]
val = False
if arg not in wts_map:
break
wts[wts_map[arg]] = val
userlist.pop(0)
if not userlist:
userlist = None # Match everything...
if wts['inst'] is None and wts['avail'] is None:
wts['inst'] = True
wts['avail'] = True
if wts['lang'] is None and wts['pkg'] is None and wts['env'] is None:
wts['env'] = True
wts['pkg'] = True
uv = not wts['hidden']
dGL = self.doGroupLists(patterns=userlist,
uservisible=uv, return_evgrps=True)
installed, available, ievgrps, evgrps = dGL
if not wts['env']:
ievgrps = []
evgrps = []
if not wts['inst']:
installed = []
ievgrps = []
if not wts['avail']:
available = []
evgrps = []
done = []
def _out_grp(sect, groups):
if not groups:
return
done.append(sect)
if summary:
self.verbose_logger.log(yum.logginglevels.INFO_2,
"%s %u", sect, len(groups))
return
self.verbose_logger.log(yum.logginglevels.INFO_2, sect)
for group in groups:
msg = ' %s' % group.ui_name
if wts['id']:
msg += ' (%s)' % group.compsid
if group.langonly:
msg += ' [%s]' % group.langonly
self.verbose_logger.info('%s', msg)
_out_grp(_('Installed environment groups:'), ievgrps)
_out_grp(_('Available environment groups:'), evgrps)
groups = []
for group in installed:
if group.langonly: continue
if not wts['pkg']: continue
groups.append(group)
_out_grp(_('Installed groups:'), groups)
groups = []
for group in installed:
if not group.langonly: continue
if not wts['lang']: continue
groups.append(group)
_out_grp(_('Installed language groups:'), groups)
groups = []
for group in available:
if group.langonly: continue
if not wts['pkg']: continue
groups.append(group)
_out_grp(_('Available Groups:'), groups)
groups = []
for group in available:
if not group.langonly: continue
if not wts['lang']: continue
groups.append(group)
_out_grp(_('Available language groups:'), groups)
if not done:
self.logger.error(_('Warning: no environments/groups match: %s'),
", ".join(ouserlist))
return 0, []
return 0, [_('Done')]
def returnGroupSummary(self, userlist):
"""Print a summary of the groups that match the given names or
wildcards.
:param userlist: a list of names or wildcards specifying the
groups to summarise. If *userlist* is an empty list, all
installed and available packages will be summarised
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
return self._returnGroupLists(userlist, summary=True)
def returnGroupInfo(self, userlist):
"""Print complete information about the groups that match the
given names or wildcards.
:param userlist: a list of names or wildcards specifying the
groups to print information about
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
for strng in userlist:
group_matched = False
pkg_grp = True
grp_grp = True
if strng.startswith('@^'):
strng = strng[2:]
pkg_grp = False
elif strng.startswith('@'):
strng = strng[1:]
grp_grp = False
if grp_grp:
for evgroup in self.comps.return_environments(strng):
self.displayGrpsInEnvironments(evgroup)
group_matched = True
if pkg_grp:
for group in self.comps.return_groups(strng):
self.displayPkgsInGroups(group)
group_matched = True
if not group_matched:
self.logger.error(_('Warning: group/environment %s does not exist.'), strng)
return 0, []
def installGroups(self, grouplist, upgrade=False):
"""Mark the packages in the given groups for installation.
:param grouplist: a list of names or wildcards specifying
groups to be installed
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
pkgs_used = []
if not grouplist and self.conf.group_command == 'objects':
# Do what "yum upgrade" does when upgrade_group_objects_upgrade is
# set.
for ievgrp in self.igroups.environments:
pkgs_used.extend(self._at_groupupgrade('@^' + ievgrp))
for igrp in self.igroups.groups:
pkgs_used.extend(self._at_groupupgrade('@' + igrp))
done = False
for group_string in grouplist:
grp_grp = True
pkg_grp = True
if group_string.startswith('@^'):
pkg_grp = False
group_string = group_string[2:]
elif group_string.startswith('@'):
grp_grp = False
group_string = group_string[1:]
group_matched = False
groups = []
if grp_grp:
groups = self.comps.return_environments(group_string)
for group in groups:
group_matched = True
try:
txmbrs = self.selectEnvironment(group.environmentid,
upgrade=upgrade)
except yum.Errors.GroupsError:
self.logger.critical(_('Warning: Environment group %s does not exist.'), group_string)
continue
else:
pkgs_used.extend(txmbrs)
groups = []
if pkg_grp:
groups = self.comps.return_groups(group_string)
for group in groups:
group_matched = True
try:
txmbrs = self.selectGroup(group.groupid, upgrade=upgrade)
except yum.Errors.GroupsError:
self.logger.critical(_('Warning: Package group %s does not exist.'), group_string)
continue
else:
pkgs_used.extend(txmbrs)
if not group_matched:
self.logger.error(_('Warning: group %s does not exist.'), group_string)
continue
done = True
if not pkgs_used:
if self.conf.group_command == 'objects':
self.logger.critical(_("Maybe run: yum groups mark install (see man yum)"))
exit_status = 1
if upgrade:
# upgrades don't fail
exit_status = 0
if done:
# at least one group_string was a valid group
exit_status = 0
return exit_status, [_('No packages in any requested group available to install or update')]
else:
return 2, [P_('%d package to Install', '%d packages to Install', len(pkgs_used)) % len(pkgs_used)]
def removeGroups(self, grouplist):
"""Mark the packages in the given groups for removal.
:param grouplist: a list of names or wildcards specifying
groups to be removed
:return: (exit_code, [ errors ])
exit_code is::
0 = we're done, exit
1 = we've errored, exit with error string
2 = we've got work yet to do, onto the next stage
"""
pkgs_used = []
for group_string in grouplist:
grp_grp = True
pkg_grp = True
if group_string.startswith('@^'):
pkg_grp = False
group_string = group_string[2:]
elif group_string.startswith('@'):
grp_grp = False
group_string = group_string[1:]
groups = []
if grp_grp:
groups = self.comps.return_environments(group_string)
if not groups:
self.logger.critical(_('No environment named %s exists'), group_string)
for group in groups:
try:
txmbrs = self.environmentRemove(group.environmentid)
except yum.Errors.GroupsError:
continue
else:
pkgs_used.extend(txmbrs)
groups = []
if pkg_grp:
groups = self.comps.return_groups(group_string)
if not groups:
self.logger.critical(_('No group named %s exists'), group_string)
for group in groups:
try:
txmbrs = self.groupRemove(group.groupid)
except yum.Errors.GroupsError:
continue
else:
pkgs_used.extend(txmbrs)
if not pkgs_used:
if self.conf.group_command == 'objects':
self.logger.critical(_("Maybe run: yum groups mark remove (see man yum)"))
return 0, [_('No packages to remove from groups')]
else:
return 2, [P_('%d package to remove', '%d packages to remove', len(pkgs_used)) % len(pkgs_used)]
def _promptWanted(self):
# shortcut for the always-off/always-on options
if (self.conf.assumeyes or self.conf.downloadonly) and not self.conf.assumeno:
return False
if self.conf.alwaysprompt:
return True
# prompt if:
# package was added to fill a dependency
# package is being removed
# package wasn't explicitly given on the command line
for txmbr in self.tsInfo.getMembers():
if txmbr.isDep or \
txmbr.name not in self.extcmds:
return True
# otherwise, don't prompt
return False
def usage(self):
"""Print out an explanation of command line usage."""
sys.stdout.write(self.optparser.format_help())
def shellUsage(self):
"""Print out an explanation of the shell usage."""
sys.stdout.write(self.optparser.get_usage())
def _installable(self, pkg, ematch=False):
"""check if the package is reasonably installable, true/false"""
exactarchlist = self.conf.exactarchlist
# we look through each returned possibility and rule out the
# ones that we obviously can't use
if self.rpmdb.contains(po=pkg):
self.verbose_logger.log(yum.logginglevels.DEBUG_3,
_('Package %s is already installed, skipping'), pkg)
return False
# everything installed that matches the name
installedByKey = self.rpmdb.searchNevra(name=pkg.name)
comparable = []
for instpo in installedByKey:
if isMultiLibArch(instpo.arch) == isMultiLibArch(pkg.arch):
comparable.append(instpo)
else:
self.verbose_logger.log(yum.logginglevels.DEBUG_3,
_('Discarding non-comparable pkg %s.%s'), instpo.name, instpo.arch)
continue
# go through each package
if len(comparable) > 0:
for instpo in comparable:
if pkg.verGT(instpo): # we're newer - this is an update, pass to them
if instpo.name in exactarchlist:
if pkg.arch == instpo.arch:
return True
else:
return True
elif pkg.verEQ(instpo): # same, ignore
return False
elif pkg.verLT(instpo): # lesser, check if the pkgtup is an exactmatch
# if so then add it to be installed
# if it can be multiply installed
# this is where we could handle setting
# it to be an 'oldpackage' revert.
if ematch and self.allowedMultipleInstalls(pkg):
return True
else: # we've not got any installed that match n or n+a
self.verbose_logger.log(yum.logginglevels.DEBUG_1, _('No other %s installed, adding to list for potential install'), pkg.name)
return True
return False
class YumOptionParser(OptionParser):
"""Subclass that makes some minor tweaks to make OptionParser do things the
"yum way".
"""
def __init__(self,base, **kwargs):
# check if this is called with a utils=True/False parameter
if 'utils' in kwargs:
self._utils = kwargs['utils']
del kwargs['utils']
else:
self._utils = False
OptionParser.__init__(self, **kwargs)
self.logger = logging.getLogger("yum.cli")
self.base = base
self.plugin_option_group = OptionGroup(self, _("Plugin Options"))
self.add_option_group(self.plugin_option_group)
self._addYumBasicOptions()
def error(self, msg):
"""Output an error message, and exit the program. This method
is overridden so that error output goes to the logger.
:param msg: the error message to output
"""
self.print_usage()
self.logger.critical(_("Command line error: %s"), msg)
sys.exit(1)
def firstParse(self,args):
"""Parse only command line options that affect basic yum
setup.
:param args: a list of command line options to parse
:return: a dictionary containing the values of command line
options
"""
try:
args = _filtercmdline(
('--noplugins','--version','-q', '-v', "--quiet", "--verbose"),
('-c', '--config', '-d', '--debuglevel',
'-e', '--errorlevel',
'--installroot',
'--disableplugin', '--enableplugin', '--releasever',
'--setopt'),
args)
except ValueError, arg:
self.base.usage()
print >> sys.stderr, (_("\n\n%s: %s option requires an argument") %
('Command line error', arg))
sys.exit(1)
return self.parse_args(args=args)[0]
@staticmethod
def _splitArg(seq):
""" Split all strings in seq, at "," and whitespace.
Returns a new list. """
ret = []
for arg in seq:
ret.extend(arg.replace(",", " ").split())
return ret
def setupYumConfig(self, args=None):
"""Parse command line options.
:param args: the command line arguments entered by the user
:return: (opts, cmds) opts is a dictionary containing
the values of command line options. cmds is a list of the
command line arguments that were not parsed as options.
For example, if args is ["install", "foo", "--verbose"],
cmds will be ["install", "foo"].
"""
if not args:
(opts, cmds) = self.parse_args()
else:
(opts, cmds) = self.parse_args(args=args)
# Let the plugins know what happened on the command line
self.base.plugins.setCmdLine(opts, cmds)
try:
# config file is parsed and moving us forward
# set some things in it.
if opts.tolerant or self.base.conf.tolerant: # Make it slower capt.
self.base.conf.recheck_installed_requires = True
# Handle remaining options
if opts.assumeyes:
self.base.conf.assumeyes = 1
if opts.assumeno:
self.base.conf.assumeno = 1
self.base.conf.downloadonly = opts.dlonly
self.base.conf.downloaddir = opts.dldir
# Store all the updateinfo filters somewhere...
self.base.updateinfo_filters['security'] = opts.security
self.base.updateinfo_filters['bugfix'] = opts.bugfix
self.base.updateinfo_filters['advs'] = self._splitArg(opts.advs)
self.base.updateinfo_filters['bzs'] = self._splitArg(opts.bzs)
self.base.updateinfo_filters['cves'] = self._splitArg(opts.cves)
self.base.updateinfo_filters['sevs'] = self._splitArg(opts.sevs)
# Treat users like root as much as possible:
if not self.base.setCacheDir():
self.base.conf.cache = 1
if opts.cacheonly:
self.base.conf.cache = 1
if opts.obsoletes:
self.base.conf.obsoletes = 1
if opts.installroot:
self._checkAbsInstallRoot(opts)
self.base.conf.installroot = opts.installroot
if opts.skipbroken:
self.base.conf.skip_broken = True
if opts.showdupesfromrepos:
self.base.conf.showdupesfromrepos = True
if opts.color not in (None, 'auto', 'always', 'never',
'tty', 'if-tty', 'yes', 'no', 'on', 'off'):
raise ValueError, _("--color takes one of: auto, always, never")
elif opts.color is None:
if self.base.conf.color != 'auto':
self.base.term.reinit(color=self.base.conf.color)
else:
_remap = {'tty' : 'auto', 'if-tty' : 'auto',
'1' : 'always', 'true' : 'always',
'yes' : 'always', 'on' : 'always',
'0' : 'always', 'false' : 'always',
'no' : 'never', 'off' : 'never'}
opts.color = _remap.get(opts.color, opts.color)
if opts.color != 'auto':
self.base.term.reinit(color=opts.color)
self.base.conf.disable_excludes = self._splitArg(opts.disableexcludes)
self.base.conf.disable_includes = self._splitArg(opts.disableincludes)
for exclude in self._splitArg(opts.exclude):
try:
excludelist = self.base.conf.exclude
excludelist.append(exclude)
self.base.conf.exclude = excludelist
except yum.Errors.ConfigError, e:
self.logger.critical(e)
self.base.usage()
sys.exit(1)
if opts.rpmverbosity is not None:
self.base.conf.rpmverbosity = opts.rpmverbosity
# setup the progress bars/callbacks
self.base.setupProgressCallbacks()
# setup the callbacks to import gpg pubkeys and confirm them
self.base.setupKeyImportCallbacks()
# Process repo enables and disables in order
for opt, repoexp in opts.repos:
try:
if opt == '--enablerepo':
self.base.repos.enableRepo(repoexp)
elif opt == '--disablerepo':
self.base.repos.disableRepo(repoexp)
except yum.Errors.ConfigError, e:
self.logger.critical(e)
self.base.usage()
sys.exit(1)
# Disable all gpg key checking, if requested.
if opts.nogpgcheck:
# Altering the normal configs. doesn't work too well, esp. with
# regard to dynamically enabled repos.
self.base._override_sigchecks = True
for repo in self.base.repos.listEnabled():
repo._override_sigchecks = True
except ValueError, e:
self.logger.critical(_('Options error: %s'), e)
self.base.usage()
sys.exit(1)
return opts, cmds
def _checkAbsInstallRoot(self, opts):
if not opts.installroot:
return
if opts.installroot[0] == '/':
return
# We have a relative installroot ... haha
self.logger.critical(_('--installroot must be an absolute path: %s'),
opts.installroot)
sys.exit(1)
def getRoot(self,opts):
"""Return the root location to use for the yum operation.
This location can be changed by using the --installroot
option.
:param opts: a dictionary containing the values of the command
line options
:return: a string representing the root location
"""
self._checkAbsInstallRoot(opts)
# If the conf file is inside the installroot - use that.
# otherwise look for it in the normal root
if opts.installroot and opts.installroot.lstrip('/'):
if os.access(opts.installroot+'/'+opts.conffile, os.R_OK):
opts.conffile = opts.installroot+'/'+opts.conffile
elif opts.conffile == '/etc/yum/yum.conf':
# check if /installroot/etc/yum.conf exists.
if os.access(opts.installroot+'/etc/yum.conf', os.R_OK):
opts.conffile = opts.installroot+'/etc/yum.conf'
root=opts.installroot
else:
root = '/'
return root
def _wrapOptParseUsage(self, opt, value, parser, *args, **kwargs):
self.base.usage()
self.exit()
def _addYumBasicOptions(self):
def repo_optcb(optobj, opt, value, parser):
'''Callback for the enablerepo and disablerepo option.
Combines the values given for these options while preserving order
from command line.
'''
dest = eval('parser.values.%s' % optobj.dest)
dest.append((opt, value))
if self._utils:
group = OptionGroup(self, "Yum Base Options")
self.add_option_group(group)
else:
group = self
# Note that we can't use the default action="help" because of the
# fact that print_help() unconditionally does .encode() ... which is
# bad on unicode input.
group.conflict_handler = "resolve"
group.add_option("-h", "--help", action="callback",
callback=self._wrapOptParseUsage,
help=_("show this help message and exit"))
group.conflict_handler = "error"
group.add_option("-t", "--tolerant", action="store_true",
help=_("be tolerant of errors"))
group.add_option("-C", "--cacheonly", dest="cacheonly",
action="store_true",
help=_("run entirely from system cache, don't update cache"))
group.add_option("-c", "--config", dest="conffile",
default='/etc/yum/yum.conf',
help=_("config file location"), metavar='[config file]')
group.add_option("-R", "--randomwait", dest="sleeptime", type='int',
default=None,
help=_("maximum command wait time"), metavar='[minutes]')
group.add_option("-d", "--debuglevel", dest="debuglevel", default=None,
help=_("debugging output level"), type='int',
metavar='[debug level]')
group.add_option("--showduplicates", dest="showdupesfromrepos",
action="store_true",
help=_("show duplicates, in repos, in list/search commands"))
group.add_option("--show-duplicates", dest="showdupesfromrepos",
action="store_true",
help=SUPPRESS_HELP)
group.add_option("-e", "--errorlevel", dest="errorlevel", default=None,
help=_("error output level"), type='int',
metavar='[error level]')
| group.add_option("", "--rpmverbosity", default=None, | 8,933 | lcc_e | python | null | 1fc52a605c2339692497389ae8415624af2aa9f420e6efab |
|
# (C) Copyright 2015-2021 Sei Lisa. All rights reserved.
#
# This file is part of LSL PyOptimizer.
#
# LSL PyOptimizer is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# LSL PyOptimizer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LSL PyOptimizer. If not, see <http://www.gnu.org/licenses/>.
# Parser module. Converts the source into an abstract syntax tree,
# generating also the symbol table.
# TODO: Add info to be able to propagate error position to the source.
from lslopt.lslcommon import Key, Vector, Quaternion, types, nr
from lslopt import lslcommon, lslfuncs
from strutil import *
strutil_used
import re
# Note this module was basically written from bottom to top, which may help
# reading it.
WHITESPACE_CHARS = frozenset({' ', '\r', '\n', '\x0B', '\x0C'})
SINGLE_SYMBOLS = frozenset({'.', ';', '{', '}', ',', '=', '(', ')', '-', '+',
'*', '/', '%', '@', ':', '<', '>', '[', ']', '&', '|', '^', '~', '!'})
def isdigit(c):
return '0' <= c <= '9'
def isalpha_(c):
return c == '_' or 'A' <= c <= 'Z' or 'a' <= c <= 'z'
def isalphanum_(c):
return isalpha_(c) or isdigit(c)
def ishex(c):
return '0' <= c <= '9' or 'A' <= c <= 'F' or 'a' <= c <= 'f'
def GetErrLineCol(parser):
errorpos = parser.errorpos
# Find zero-based line number
lno = parser.script.count('\n', 0, errorpos)
# Find start of current line
lstart = parser.script.rfind('\n', 0, errorpos) + 1
# Find zero-based column number in characters
cno = len(any2u(parser.script[lstart:errorpos], 'utf8'))
# Find in #line directives list
i = len(parser.linedir)
filename = '<stdin>' # value to return if there's no #line before lno
while i:
i -= 1
line = parser.linedir[i]
# We wouldn't know where to report the error in this case:
assert lno != line[0], \
"Error position is in processed #line directive?!"
if line[0] < lno: # found the last #line directive before lno
# replace the value of lno
lno = lno - line[0] + line[1] - 2
filename = line[2]
break
return (lno + 1, cno + 1, filename)
class EParse(Exception):
def __init__(self, parser, msg):
self.errorpos = parser.errorpos
self.lno, self.cno, self.fname = GetErrLineCol(parser)
filename = self.fname
if parser.emap and filename == '<stdin>':
filename = parser.filename
filename = (str2u(filename, 'utf8')
.replace(u'\\', u'\\\\')
.replace(u'"', u'\\"')
)
if parser.emap:
msg = u'::ERROR::"%s":%d:%d: %s' % (
any2u(filename.lstrip('u')), self.lno, self.cno, msg)
elif parser.processpre and filename != '<stdin>':
msg = u"(Line %d char %d): ERROR in \"%s\": %s" % (
self.lno, self.cno, filename, msg)
else:
msg = u"(Line %d char %d): ERROR: %s" % (self.lno, self.cno, msg)
super(EParse, self).__init__(msg)
class EParseUEOF(EParse):
def __init__(self, parser):
parser.errorpos = len(parser.script)
super(EParseUEOF, self).__init__(parser, u"Unexpected EOF")
class EParseSyntax(EParse):
def __init__(self, parser):
super(EParseSyntax, self).__init__(parser, u"Syntax error")
class EParseAlreadyDefined(EParse):
def __init__(self, parser):
super(EParseAlreadyDefined, self).__init__(parser,
u"Name previously declared within scope")
class EParseUndefined(EParse):
def __init__(self, parser):
super(EParseUndefined, self).__init__(parser,
u"Name not defined within scope")
class EParseTypeMismatch(EParse):
def __init__(self, parser):
super(EParseTypeMismatch, self).__init__(parser, u"Type mismatch")
class EParseReturnShouldBeEmpty(EParse):
def __init__(self, parser):
# When the types don't match, the error es EParseTypeMismatch instead.
super(EParseReturnShouldBeEmpty, self).__init__(parser,
u"Return statement type doesn't match function return type")
class EParseReturnIsEmpty(EParse):
def __init__(self, parser):
super(EParseReturnIsEmpty, self).__init__(parser,
u"Function returns a value but return statement doesn't")
# This error message may sound funny, for good reasons.
class EParseInvalidField(EParse):
def __init__(self, parser):
super(EParseInvalidField, self).__init__(parser,
u"Use of vector or quaternion method on incorrect type")
class EParseFunctionMismatch(EParse):
def __init__(self, parser):
super(EParseFunctionMismatch, self).__init__(parser,
u"Function call mismatches type or number of arguments")
class EParseDeclarationScope(EParse):
def __init__(self, parser):
super(EParseDeclarationScope, self).__init__(parser,
u"Declaration requires a new scope -- use { and }")
class EParseCantChangeState(EParse):
def __init__(self, parser):
super(EParseCantChangeState, self).__init__(parser,
u"Global functions can't change state")
class EParseCodePathWithoutRet(EParse):
def __init__(self, parser):
super(EParseCodePathWithoutRet, self).__init__(parser,
u"Not all code paths return a value")
class EParseDuplicateLabel(EParse):
def __init__(self, parser):
super(EParseDuplicateLabel, self).__init__(parser,
u"Duplicate local label name. That won't allow the Mono script"
u" to be saved, and will not work as expected in LSO.")
class EParseInvalidCase(EParse):
def __init__(self, parser, kind):
super(EParseInvalidCase, self).__init__(parser,
u"'%s' used outside a 'switch' statement" % kind)
class EParseCaseNotAllowed(EParse):
def __init__(self, parser, kind):
super(EParseCaseNotAllowed, self).__init__(parser,
u"'%s' label only allowed at the main 'switch' block" % kind)
class EParseManyDefaults(EParse):
def __init__(self, parser):
super(EParseManyDefaults, self).__init__(parser,
u"multiple 'default' labels inside 'switch' statement")
class EParseMissingDefault(EParse):
def __init__(self, parser):
super(EParseMissingDefault, self).__init__(parser,
u"Missing 'default:' label inside 'switch' statement; disable"
u" option 'errmissingdefault' to disable this error.")
class EParseInvalidBreak(EParse):
def __init__(self, parser):
super(EParseInvalidBreak, self).__init__(parser,
u"'break' used outside a loop or switch"
if parser.enableswitch and parser.breakcont
else u"'break' used outside a switch" if parser.enableswitch
else u"'break' used outside a loop")
class EParseInvalidCont(EParse):
def __init__(self, parser):
super(EParseInvalidCont, self).__init__(parser,
u"'continue' used outside a loop")
class EParseInvalidBrkContArg(EParse):
def __init__(self, parser):
super(EParseInvalidBrkContArg, self).__init__(parser,
u"Invalid argument to 'break' or 'continue'" if parser.breakcont
else u"Invalid argument to 'break'")
class EParseInvalidBackslash(EParse):
def __init__(self, parser):
super(EParseInvalidBackslash, self).__init__(parser,
u"Preprocessor directive can't end in backslash."
u" Activate the preprocessor or put everything in the same line.")
class EParseInvalidLabelOpt(EParse):
def __init__(self, parser):
super(EParseInvalidLabelOpt, self).__init__(parser,
u"When optimization is active, a label can't be the immediate"
u" child of a 'for', 'if', 'while' or 'do'. Disable optimization"
u" or rewrite the code in some other way.")
class EParseNoConversion(EParse):
def __init__(self, parser):
super(EParseNoConversion, self).__init__(parser,
u"There's no conversion function in the library for this type")
class EInternal(Exception):
"""This exception is a construct to allow a different function to cause an
immediate return of EOF from parser.GetToken().
"""
pass
class parser(object):
assignment_toks = frozenset({'=', '+=', '-=', '*=', '/=', '%='})
extassignment_toks = frozenset({'|=', '&=', '^=', '<<=', '>>='})
double_toks = frozenset({'++', '--', '+=', '-=', '*=', '/=', '%=', '==',
'!=', '>=', '<=', '&&', '||', '<<', '>>'})
extdouble_toks = frozenset({'|=', '&=', '^='})
# These are hardcoded because additions or modifications imply
# important changes to the code anyway.
base_keywords = frozenset({'default', 'state', 'event', 'jump', 'return',
'if', 'else', 'for', 'do', 'while', 'print', 'TRUE', 'FALSE'})
brkcont_keywords = frozenset({'break', 'continue'})
switch_keywords = frozenset({'switch', 'case', 'break', 'default'})
PythonType2LSLToken = {int:'INTEGER_VALUE', float:'FLOAT_VALUE',
unicode:'STRING_VALUE', Key:'KEY_VALUE', Vector:'VECTOR_VALUE',
Quaternion:'ROTATION_VALUE', list:'LIST_VALUE'}
TypeToExtractionFunction = {}
# Utility function
def GenerateLabel(self):
while True:
self.labelcnt += 1
unique = 'J_autoGen%05d' % self.labelcnt
if unique not in self.locallabels:
break
self.locallabels.add(unique)
return unique
def PushScope(self):
"""Create a new symbol table / scope level"""
self.scopeindex = len(self.symtab)
self.symtab.append({}) # Add new symbol table
self.scopestack.append(self.scopeindex)
def PopScope(self):
"""Return to the previous scope level"""
assert self.scopeindex == self.scopestack[-1]
self.scopestack.pop()
self.scopeindex = self.scopestack[-1]
assert len(self.scopestack) > 0
def AddSymbol(self, kind, scope, name, **values):
values['Kind'] = kind
if kind in ('v', 'l'):
values['Scope'] = scope
self.symtab[scope][name] = values
def FindSymbolPartial(self, symbol, MustBeLabel = False):
"""Find a symbol in all visible scopes in order, but not in the full
globals table (only globals seen so far are visible).
Labels have special scope rules: other identifiers with the same
name that are not labels are invisible to JUMP statements. Example:
default{timer(){ @x; {integer x; jump x;} }}
finds the label at the outer block. However:
default{timer(){ @x; integer x; }}
gives an identifier already defined error. On the other hand, labels
hide other types (but that's dealt with in the caller to this function):
default{timer(){ integer a; { @a; a++; } }}
gives an Name Not Defined error.
"""
scopelevel = len(self.scopestack)
while scopelevel:
scopelevel -= 1
symtab = self.symtab[self.scopestack[scopelevel]]
if symbol in symtab and (not MustBeLabel
or symtab[symbol]['Kind'] == 'l'):
return symtab[symbol]
return None
# No labels or states allowed here (but functions are)
def FindSymbolFull(self, symbol, globalonly=False):
"""Returns the symbol table entry for the given symbol."""
scopelevel = 1 if globalonly else len(self.scopestack)
while scopelevel: # Loop over all scopes in the stack
scopelevel -= 1
symtab = self.symtab[self.scopestack[scopelevel]]
if symbol in symtab:
# This can't happen, as functions can't be local
#if len(symtab[symbol]) > 3:
# return (symtab[symbol][1], symtab[symbol][3])
return symtab[symbol]
try:
return self.symtab[0][symbol] # Quick guess
except KeyError:
if (self.disallowglobalvars and symbol not in self.symtab[0]
or symbol not in self.globals
):
return None # Disallow forwards in global var mode
return self.globals[symbol]
def ValidateField(self, typ, field):
if typ == 'vector' and field in ('x', 'y', 'z') \
or typ == 'rotation' and field in ('x', 'y', 'z', 's'):
return
raise EParseInvalidField(self)
def autocastcheck(self, value, tgttype):
"""Check if automatic dynamic cast is possible. If explicit casts are
requested, insert one.
"""
tval = value.t
if tval == tgttype:
return value
if tval in ('string', 'key') and tgttype in ('string', 'key') \
or tval == 'integer' and tgttype == 'float':
if self.explicitcast:
return nr(nt='CAST', t=tgttype, ch=[value])
return value
raise EParseTypeMismatch(self)
def ueof(self):
"""Check for unexpected EOF"""
if self.pos >= self.length:
raise EParseUEOF(self)
def ceof(self):
"""Check for normal EOF"""
if self.pos >= self.length:
raise EInternal() # force GetToken to return EOF
def SetOpt(self, option, value):
# See parse() for meaning of options.
if option == 'extendedglobalexpr':
self.extendedglobalexpr = value
if option == 'extendedtypecast':
self.extendedtypecast = value
if option == 'extendedassignment':
self.extendedassignment = value
if option == 'explicitcast':
self.explicitcast = value
if option == 'allowkeyconcat':
self.allowkeyconcat = value
if option == 'allowmultistrings':
self.allowmultistrings = value
if option == 'processpre':
self.processpre = value
# TODO: Allow pure C-style string escapes. This is low-priority.
#if option == 'allowcescapes':
# self.allowcescapes = value
# Enable switch statements.
if option == 'enableswitch':
if not self.enableswitch and value:
self.keywords |= self.switch_keywords
elif self.enableswitch and not value:
self.keywords = self.base_keywords.copy()
if self.breakcont:
self.keywords |= self.brkcont_keywords
self.enableswitch = value
# Enable break/continue
if option == 'breakcont':
if not self.breakcont and value:
self.keywords |= self.brkcont_keywords
elif self.breakcont and not value:
self.keywords = self.base_keywords.copy()
if self.enableswitch:
self.keywords |= self.switch_keywords
self.breakcont = value
if option == 'errmissingdefault':
self.errmissingdefault = value
if option == 'lazylists':
self.lazylists = value
if option == 'duplabels':
self.duplabels = value
if option == 'shrinknames':
self.shrinknames = value
if option == 'funcoverride':
self.funcoverride = value
if option == 'inline':
self.enable_inline = value
def ProcessDirective(self, directive):
"""Process a given preprocessor directive during parsing."""
# Ignore directives on the first pass
if self.scanglobals:
return
if directive[len(directive)-1:] == '\\':
raise EParseInvalidBackslash(self)
# compile the RE lazily, to avoid penalizing programs not using it
if self.parse_directive_re is None:
self.parse_directive_re = re.compile(
r'^#\s*(?:'
r'(?:line)?\s+(\d+)(?:\s+("(?:\\.|[^"])*")(?:\s+\d+)*)?'
r'|'
r'(?:pragma)\s+(?:OPT)\s+([-+,a-z0-9_]+)'
r'|'
r'([a-z0-9_]+)(?:\s+(.*)?)' # others
r')\s*$'
, re.I
)
match = self.parse_directive_re.search(directive)
if match is not None:
# Something parsed
if match.group(1) is not None:
#line directive
if match.group(2) is not None:
# filename included
if match.group(2).find('\\') != -1:
# interpret escapes
from ast import literal_eval
filename = literal_eval(match.group(2))
else:
filename = match.group(2)[1:-1]
self.lastFILE = filename
else:
filename = self.lastFILE
# Referenced line number (in the #line directive)
reflinenum = int(match.group(1))
# Actual line number (where the #line directive itself is)
# FIXME: this is O(n^2); track line number instead of this hack
actlinenum = self.script.count('\n', 0, self.pos)
self.linedir.append((actlinenum, reflinenum, filename))
del actlinenum, reflinenum, filename
elif match.group(3): # '#pragma OPT <options>' found
opts = match.group(3).lower().split(',')
for opt in opts:
if opt != '':
if opt[0] == '-':
self.SetOpt(opt[1:], False)
elif opt[0] == '+':
self.SetOpt(opt[1:], True)
else:
self.SetOpt(opt, True)
elif match.group(4) == 'warning':
if match.group(5):
warning("Warning: #warning " + match.group(5))
else:
warning("Warning: #warning")
# else ignore
def GetToken(self):
"""Lexer"""
try:
while self.pos < self.length:
# In case of error, report it at the start of this token.
self.errorpos = self.pos
c = self.script[self.pos]
self.pos += 1
# Process preprocessor directives
if self.processpre and self.linestart and c == '#':
# Preprocessor directive.
# Most are not supposed to reach us but some do:
# - gcpp generates lines in the output like:
# # 123 "file.lsl"
# - other preprocessors including Boost Wave and mcpp
# generate lines like:
# #line 123 "file.lsl"
# Firestorm comments these out and instead outputs
# //#line 123 "file.lsl"
# - #pragma directives
# - #define directives from mcpp's #pragma MCPP put_defines
# or from gcpp's -dN option, that we use to detect some
# definitions.
self.ceof()
while self.script[self.pos] != '\n':
self.pos += 1
self.ceof() # A preprocessor command at EOF is not unexpected EOF.
self.ProcessDirective(self.script[self.errorpos:self.pos])
self.pos += 1
self.ceof()
continue
# Process comments
if c == '/':
if self.script[self.pos:self.pos+1] == '/':
self.pos += 1
if self.enable_inline and self.script.startswith(
'pragma inline', self.pos
) and not isalphanum_(self.script[self.pos + 13:
self.pos + 14]
):
self.pos += 12 # len('pragma inline') - 1
while self.script[self.pos] != '\n':
self.pos += 1
# Check for normal EOF. Note: 'inline' is not
# inserted if the file ends before a newline.
self.ceof()
return ('IDENT', 'inline')
self.ceof()
while self.script[self.pos] != '\n':
self.pos += 1
self.ceof() # A single-line comment at EOF is not unexpected EOF.
self.linestart = True
self.pos += 1
self.ceof()
continue
elif self.script[self.pos:self.pos+1] == '*':
self.pos += 2
if self.enable_inline and self.script.startswith(
'pragma inline*/', self.pos-1):
self.pos += 14 # len('pragma inline*/') - 1
return ('IDENT', 'inline')
while self.script[self.pos-1:self.pos+1] != '*/':
self.pos += 1
self.ueof() # An unterminated multiline comment *is* unexpected EOF.
self.pos += 1
self.ceof()
continue
# self.linestart is related to the preprocessor, therefore we
# check the characters that are relevant for standard C.
if c not in WHITESPACE_CHARS:
self.linestart = False
# Process strings
if c == '"' or c == 'L' and self.script[self.pos:self.pos+1] == '"':
strliteral = ''
if c == 'L':
self.pos += 1
strliteral = '"'
savepos = self.pos # we may need to backtrack
is_string = True # by default
while self.script[self.pos:self.pos+1] != '"':
# per the grammar, on EOF, it's not considered a string
if self.pos >= self.length:
self.pos = savepos
is_string = False
break
if self.script[self.pos] == '\\':
self.pos += 1
self.ueof()
if self.script[self.pos] == 'n':
strliteral += '\n'
elif self.script[self.pos] == 't':
strliteral += ' '
elif self.script[self.pos] == '\n':
# '\' followed by a newline; it's not a string.
self.pos = savepos
is_string = False
self.linestart = True
break
else:
strliteral += self.script[self.pos]
else:
strliteral += self.script[self.pos]
self.pos += 1
if is_string:
self.pos += 1
return ('STRING_VALUE', lslfuncs.zstr(str2u(strliteral, 'utf8')))
# fall through (to consider the L or to ignore the ")
if isalpha_(c):
# Identifier or reserved
ident = c
while isalphanum_(self.script[self.pos:self.pos+1]):
ident += self.script[self.pos]
self.pos += 1
# Got an identifier - check if it's a reserved word
if ident in self.keywords:
return (ident.upper(),)
if ident in types:
if ident == 'quaternion':
ident = 'rotation' # Normalize types
return ('TYPE',ident)
if ident in self.events:
return ('EVENT_NAME',ident)
if ident in self.constants:
value = self.constants[ident]
return (self.PythonType2LSLToken[type(value)], value)
return ('IDENT', ident)
# Process numbers: float, hex integer, dec integer
if c == '.' or isdigit(c):
number = ''
if c != '.':
# We have a digit, which means we have for sure either
# an integer or a float.
# Eat as many decimal digits as possible
number = c
while isdigit(self.script[self.pos:self.pos+1]):
number += self.script[self.pos]
self.pos += 1
if number == '0' and self.script[self.pos:self.pos+1] in ('x','X') \
and ishex(self.script[self.pos+1:self.pos+2]):
# We don't need the 0x prefix.
self.pos += 1
# Eat leading zeros to know the real length.
while self.script[self.pos:self.pos+1] == '0':
self.pos += 1
number = ''
while ishex(self.script[self.pos:self.pos+1]):
if len(number) < 9: # don't let it grow more than necessary
number += self.script[self.pos]
self.pos += 1
if number == '':
# We know there was at least a valid digit so it
# must've been all zeros.
number = '0'
if len(number) > 8:
number = -1
else:
number = lslfuncs.S32(int(number, 16))
return ('INTEGER_VALUE', number)
# Add the dot if present
if self.script[self.pos:self.pos+1] == '.':
number += '.'
self.pos += 1
else:
number = c
while isdigit(self.script[self.pos:self.pos+1]):
number += self.script[self.pos]
self.pos += 1
# At this point, number contains as many digits as there are before the dot,
# the dot if present, and as many digits as there are after the dot.
if number != '.': # A dot alone can't be a number so we rule it out here.
exp = ''
if self.script[self.pos:self.pos+1] in ('e','E'):
epos = self.pos # Temporary position tracker, made permanent only if the match succeeds
exp = self.script[epos]
epos += 1
if self.script[epos:epos+1] in ('+','-'):
exp += self.script[epos]
epos += 1
if isdigit(self.script[epos:epos+1]):
# Now we *do* have an exponent.
exp += self.script[epos]
epos += 1
while isdigit(self.script[epos:epos+1]):
exp += self.script[epos]
epos += 1
self.pos = epos # "Commit" the new position
else:
exp = '' # No cigar. Rollback and backtrack. Invalidate exp.
if exp != '' or '.' in number: # Float
if '.' in number:
# Eat the 'F' if present
if self.script[self.pos:self.pos+1] in ('f','F'):
# Python doesn't like the 'F' so don't return it
#exp += self.script[self.pos]
self.pos += 1
return ('FLOAT_VALUE', lslfuncs.F32(float(number + exp)))
if len(number) > 10 or len(number) == 10 and number > '4294967295':
number = -1
else:
number = lslfuncs.S32(int(number))
return ('INTEGER_VALUE', number)
if self.script[self.pos-1:self.pos+1] in self.double_toks \
or self.extendedassignment and self.script[self.pos-1:self.pos+1] in self.extdouble_toks:
self.pos += 1
if self.extendedassignment and self.script[self.pos-2:self.pos+1] in ('<<=', '>>='):
self.pos += 1
return (self.script[self.pos-3:self.pos],)
return (self.script[self.pos-2:self.pos],)
if c in SINGLE_SYMBOLS:
return (c,)
if c == '\n':
self.linestart = True
# We eat spacers AND any other character, so the following is not needed,
# although the lex file includes it (the lex file does not count() invalid characters
# for the purpose of error reporting).
#if c in ' \n\r\x0B':
# continue
except EInternal:
pass # clear the exception and fall through
return ('EOF',)
def NextToken(self):
"""Calls GetToken and sets the internal token."""
self.tok = self.GetToken()
# Recursive-descendent parser. The result is an AST and a symbol table.
def expect(self, toktype):
"""Raise exception if the current token is not the given one."""
if self.tok[0] != toktype:
if self.tok[0] == 'EOF':
raise EParseUEOF(self)
raise EParseSyntax(self)
def does_something(self, blk):
"""Tell if a list of nodes does something or is just empty statements
(a pure combination of ';' and '{}' and '@')
"""
for node in blk:
if '@' != node.nt != ';':
if node.nt == '{}':
if self.does_something(node.ch):
return True
else:
return True
return False
def Parse_vector_rotation_tail(self):
"""(See Parse_unary_postfix_expression for context)
To our advantage, the precedence of the closing '>' in a vector or
rotation literal is that of an inequality. Our strategy will thus be
to perform the job of an inequality, calling the lower level 'shift'
rule and building the inequalities if they are not '>'. When we find a
'>', we check whether the next token makes sense as beginning an
inequality; if not, we finally close the vector or rotation.
But first, a quaternion _may_ have a full expression at the third
component, so we tentatively parse this position as an expression, and
backtrack if it causes an error.
"""
ret = []
pos = self.pos
errorpos = self.errorpos
tok = self.tok
component3 = False
try:
component3 = self.Parse_expression()
# Checking here for '>' might parse a different grammar, because
# it might allow e.g. <1,2,3==3>; as a vector, which is not valid.
# Not too sure about that, but we're cautious and disable this
# just in case.
#if self.tok[0] == '>':
# return ret
self.expect(',')
self.NextToken()
except EParse: # The errors can be varied, e.g. <0,0,0>-v; raises EParseTypeMismatch
# Backtrack
self.pos = pos
self.errorpos = errorpos
self.tok = tok
# We do this here to prevent a type mismatch above
if component3 is not False:
ret.append(self.autocastcheck(component3, 'float'))
# OK, here we are.
inequality = self.Parse_shift() # shift is the descendant of inequality
while self.tok[0] in ('<', '<=', '>=', '>'):
op = self.tok[0]
self.NextToken()
if op == '>':
# Check if the current token can be a part of a comparison.
# If not, it's a vector/quaternion terminator.
if self.tok[0] not in (
# List adapted from this section of the bison report:
#state 570
#
# 176 expression: expression '>' . expression
# 214 quaternion_initializer: '<' expression ',' expression ',' expression ',' expression '>' .
'IDENT', 'INTEGER_VALUE', 'FLOAT_VALUE', 'STRING_VALUE',
'KEY_VALUE', 'VECTOR_VALUE', 'ROTATION_VALUE', 'LIST_VALUE',
'TRUE', 'FALSE', '++', '--', 'PRINT', '!', '~', '(', '['
):
ret.append(self.autocastcheck(inequality, 'float'))
return ret
# This is basically a copy/paste of the Parse_inequality handler
ltype = inequality.t
if ltype not in ('integer', 'float'):
raise EParseTypeMismatch(self)
rexpr = self.Parse_shift()
rtype = rexpr.t
if rtype not in ('integer', 'float'):
raise EParseTypeMismatch(self)
if ltype != rtype:
if rtype == 'float':
inequality = self.autocastcheck(inequality, rtype)
else:
rexpr = self.autocastcheck(rexpr, ltype)
inequality = nr(nt=op, t='integer', ch=[inequality, rexpr])
# Reaching this means an operator or lower precedence happened,
# e.g. <1,1,1,2==2> (that's syntax error in ==)
raise EParseSyntax(self)
def Parse_unary_postfix_expression(self, AllowAssignment = True):
"""Grammar parsed here:
unary_postfix_expression: TRUE | FALSE | LIST_VALUE
| INTEGER_VALUE | FLOAT_VALUE | '-' INTEGER_VALUE | '-' FLOAT_VALUE
| STRING_VALUE | KEY_VALUE | VECTOR_VALUE | ROTATION_VALUE
| vector_literal | rotation_literal | list_literal
| PRINT '(' expression ')' | IDENT '(' expression_list ')'
| lvalue '++' | lvalue '--' | assignment %if allowed
| IDENT '[' expression ']' '=' expression %if lazylists
| IDENT '[' expression ']' %if lazylists
| lvalue
vector_literal: '<' expression ',' expression ',' expression '>'
rotation_literal: '<' expression ',' expression ',' expression
',' expression '>'
list_literal: '[' optional_expression_list ']'
assignment: lvalue '=' expression | lvalue '+=' expression
| lvalue '-=' expression | lvalue '*=' expression
| lvalue '/=' expression | lvalue '%=' expression
| lvalue '|=' expression %if extendedassignment
| lvalue '&=' expression %if extendedassignment
| lvalue '<<=' expression %if extendedassignment
| lvalue '>>=' expression %if extendedassignment
lvalue: IDENT | IDENT '.' IDENT
"""
tok0 = self.tok[0]
val = self.tok[1] if len(self.tok) > 1 else None
CONST = 'CONST'
if tok0 == '-':
self.NextToken()
if self.tok[0] in ('INTEGER_VALUE', 'FLOAT_VALUE'):
val = self.tok[1]
self.NextToken()
return nr(nt=CONST, value=lslfuncs.neg(val),
t='integer' if type(val) == int else 'float')
raise EParseSyntax(self)
if tok0 == 'INTEGER_VALUE':
self.NextToken()
return nr(nt=CONST, t='integer', value=val)
if tok0 == 'FLOAT_VALUE':
self.NextToken()
return nr(nt=CONST, t='float', value=val)
if tok0 == 'STRING_VALUE':
self.NextToken()
if self.allowmultistrings:
while self.tok[0] == 'STRING_VALUE':
val += self.tok[1]
self.NextToken()
return nr(nt=CONST, t='string', value=val)
# Key constants are not currently supported - use string
#if tok0 == 'KEY_VALUE':
# return [CONST, 'key', val]
if tok0 == 'VECTOR_VALUE':
self.NextToken()
return nr(nt=CONST, t='vector', value=val)
if tok0 == 'ROTATION_VALUE':
self.NextToken()
return nr(nt=CONST, t='rotation', value=val)
if tok0 == 'LIST_VALUE':
self.NextToken()
return nr(nt=CONST, t='list', value=val)
if tok0 in ('TRUE', 'FALSE'):
self.NextToken()
return nr(nt=CONST, t='integer', value=1 if tok0 == 'TRUE' else 0)
if tok0 == '<':
self.NextToken()
saveAllowVoid = self.allowVoid
self.allowVoid = False
val = [self.autocastcheck(self.Parse_expression(), 'float')]
self.expect(',')
self.NextToken()
val.append(self.autocastcheck(self.Parse_expression(), 'float'))
self.expect(',')
self.NextToken()
# It would be cute if it were this simple:
#val.append(self.Parse_expression())
#if self.tok[0] == '>':
# self.NextToken()
# return ['VECTOR', 'vector'] + val
#self.expect(',')
#self.NextToken()
#val.append(self.Parse_inequality())
#self.expect('>')
#self.NextToken()
#return ['ROTATION', 'rotation'] + val
# Alas, it isn't. The closing angle bracket of a vector '>'
# conflicts with the inequality operator '>' in unexpected ways.
# Example: <2,2,2> * 2 would trigger the problem with that code:
# the expression parser would try to parse the inequality 2 > *2,
# choking at the *. To make things worse, LSL admits things such as
# <2,2,2 > 2> (but not things like <2,2,2 == 2> because the == has
# lower precedence than the '>' and thus it forces termination of
# the vector constant). And to make things even worse, it also
# admits things such as <2,2,2 == 2, 2> because the comma is not in
# the precedence scale, so it's quite complex to handle.
# We defer it to a separate function.
val += self.Parse_vector_rotation_tail()
self.allowVoid = saveAllowVoid
if len(val) == 3:
return nr(nt='VECTOR', t='vector', ch=val)
return nr(nt='ROTATION', t='rotation', ch=val)
if tok0 == '[':
self.NextToken()
val = self.Parse_optional_expression_list(False)
self.expect(']')
self.NextToken()
return nr(nt='LIST', t='list', ch=val)
if tok0 == 'PRINT':
self.NextToken()
self.expect('(')
self.NextToken()
saveAllowVoid = self.allowVoid
self.allowVoid = True
expr = self.Parse_expression()
self.allowVoid = saveAllowVoid
if expr.t not in types:
raise (EParseTypeMismatch(self) if expr.t is None
else EParseUndefined(self))
self.expect(')')
self.NextToken()
# Syntactically, print returns the same type as the expression.
# However, compilation in Mono throws an exception, and even in
# LSO, it throws a bounds check error when the result is a string
# or key or list and the returned value is used.
return nr(nt='PRINT', t=expr.t, ch=[expr])
if tok0 != 'IDENT':
if tok0 == 'EOF':
raise EParseUEOF(self)
raise EParseSyntax(self)
name = val
savepos = self.errorpos
self.NextToken()
# Course of action decided here.
tok0 = self.tok[0]
if tok0 == '(':
# Function call
self.NextToken()
# Functions are looked up in the global scope only.
sym = self.FindSymbolFull(val, globalonly=True)
if sym is None:
self.errorpos = savepos
raise EParseUndefined(self)
if sym['Kind'] != 'f':
self.errorpos = savepos
raise EParseUndefined(self)
args = self.Parse_optional_expression_list(sym['ParamTypes'])
self.expect(')')
self.NextToken()
return nr(nt='FNCALL', t=sym['Type'], name=name, ch=args)
sym = self.FindSymbolFull(val)
if sym is None or sym['Kind'] != 'v':
self.errorpos = savepos
raise EParseUndefined(self)
typ = sym['Type']
lvalue = nr(nt='IDENT', t=typ, name=name, scope=sym['Scope'])
# Lazy lists
if self.lazylists and tok0 == '[':
self.NextToken()
if typ != 'list':
raise EParseTypeMismatch(self)
idxexpr = self.Parse_optional_expression_list(False)
self.expect(']')
self.NextToken()
if self.tok[0] != '=' or not AllowAssignment:
return nr(nt='SUBIDX', t=None, ch=[lvalue] + idxexpr)
# Lazy list assignment
if len(idxexpr) != 1:
raise EParseFunctionMismatch(self)
if idxexpr[0].t != 'integer':
raise EParseTypeMismatch(self)
idxexpr = idxexpr[0]
self.NextToken()
saveAllowVoid = self.allowVoid
self.allowVoid = True
expr = self.Parse_expression()
self.allowVoid = saveAllowVoid
rtyp = expr.t
# Define aux function if it doesn't exist
# (leaves users room for writing their own replacement, e.g.
# one that uses something other than integer zero as filler)
if 'lazy_list_set' not in self.symtab[0]:
self.PushScope()
paramscope = self.scopeindex
self.PushScope()
blockscope = self.scopeindex
params = (['list', 'integer', 'list'],
['L', 'i', 'v'])
self.AddSymbol('f', 0, 'lazy_list_set', Loc=self.usedspots,
Type='list', ParamTypes=params[0], ParamNames=params[1],
Inline=False)
self.AddSymbol('v', paramscope, 'L', Type='list')
self.AddSymbol('v', paramscope, 'i', Type='integer')
self.AddSymbol('v', paramscope, 'v', Type='list')
#self.PushScope() # no locals
# Add body (apologies for the wall of text)
# Generated from this source:
'''
list lazy_list_set(list L, integer i, list v)
{
while (llGetListLength(L) < i)
L = L + 0;
return llListReplaceList(L, v, i, i);
}
'''
self.tree[self.usedspots] = nr(
nt='FNDEF'
, t='list'
, name='lazy_list_set'
, ptypes=params[0]
, pnames=params[1]
, scope=0
, pscope=paramscope
, ch=[
nr(nt='{}'
, t=None
, LIR=True
, scope=blockscope
, ch=[
nr(nt='WHILE'
, t=None
, ch=[
nr(nt='<'
, t='integer'
, ch=[
nr(nt='FNCALL'
, t='integer'
, name='llGetListLength'
, ch=[
nr(nt='IDENT'
, t='list'
, name='L'
, scope=paramscope
)
]
),
nr(nt='IDENT'
, t='integer'
, name='i'
, scope=paramscope
)
]
),
nr(nt='EXPR'
, t='list'
, ch=[
nr(nt='='
, t='list'
, ch=[
nr(nt='IDENT'
, t='list'
, name='L'
, scope=paramscope
),
nr(nt='+'
, t='list'
, ch=[
nr(nt='IDENT'
, t='list'
, name='L'
, scope=paramscope
),
nr(nt='CONST'
, t='integer'
, value=0
)
]
)
]
)
]
)
]
),
nr(nt='RETURN'
, t=None
, LIR=True
, ch=[
nr(nt='FNCALL'
, t='list'
, name='llListReplaceList'
, ch=[
nr(nt='IDENT'
, t='list'
, name='L'
, scope=paramscope
),
nr(nt='IDENT'
, t='list'
, name='v'
, scope=paramscope
),
nr(nt='IDENT'
, t='integer'
, name='i'
, scope=paramscope
),
nr(nt='IDENT'
, t='integer'
, name='i'
, scope=paramscope
)
]
)
]
)
]
)
]
)
self.usedspots += 1
self.PopScope()
self.PopScope()
if expr.t is None:
raise EParseTypeMismatch(self)
if expr.t != 'list':
expr = nr(nt='CAST', t='list', ch=[expr])
return nr(nt='=', t='list', ch=[lvalue, nr(
nt='FNCALL', t='list', name='lazy_list_set', scope=0,
ch=[lvalue.copy(), idxexpr, expr]
)])
if tok0 == '.':
self.NextToken()
self.expect('IDENT')
self.ValidateField(typ, self.tok[1])
lvalue = nr(nt='FLD', t='float', ch=[lvalue], fld=self.tok[1])
self.NextToken()
tok0 = self.tok[0]
typ = 'float'
if tok0 in ('++', '--'):
self.NextToken()
if lvalue.t not in ('integer', 'float'):
raise EParseTypeMismatch(self)
return nr(nt='V++' if tok0 == '++' else 'V--', t=lvalue.t,
ch=[lvalue])
if AllowAssignment and (tok0 in self.assignment_toks
or self.extendedassignment
and tok0 in self.extassignment_toks):
self.NextToken()
expr = self.Parse_expression()
rtyp = expr.t
if typ in ('integer', 'float'):
# LSL admits integer *= float (go figger).
# It acts like: lhs = (integer)((float)lhs * rhs)
# That would trigger an error without this check.
if tok0 != '*=' or typ == 'float':
expr = self.autocastcheck(expr, typ)
rtyp = typ
# Lots of drama for checking types. This is pretty much like
# addition, subtraction, multiply, divide, etc. all in one go.
if tok0 == '=':
expr = self.autocastcheck(expr, typ)
return nr(nt='=', t=typ, ch=[lvalue, expr])
if tok0 == '+=':
if typ == 'float':
expr = self.autocastcheck(expr, typ)
if rtyp != typ != 'list' or typ == rtyp == 'key':
# key + key is the only disallowed combo of equal types
raise EParseTypeMismatch(self)
if self.explicitcast:
if typ == 'list' != rtyp:
expr = nr(nt='CAST', t=typ, ch=[expr])
return nr(nt=tok0, t=typ, ch=[lvalue, expr])
if tok0 == '-=':
if typ == rtyp in ('integer', 'float', 'vector', 'rotation'):
return nr(nt=tok0, t=typ, ch=[lvalue, expr])
raise EParseTypeMismatch(self)
if tok0 in ('*=', '/='):
# There is a special case that was dealt with before.
if tok0 == '*=' and typ == 'integer' and rtyp == 'float':
return nr(nt=tok0, t=typ, ch=[lvalue, expr])
if (typ == rtyp or typ == 'vector') and rtyp in ('integer', 'float', 'rotation'):
if typ == 'vector' and rtyp == 'integer':
expr = self.autocastcheck(expr, 'float')
return nr(nt=tok0, t=typ, ch=[lvalue, expr])
raise EParseTypeMismatch(self)
if tok0 == '%=':
if typ == rtyp in ('integer', 'vector'):
return nr(nt=tok0, t=typ, ch=[lvalue, expr])
raise EParseTypeMismatch(self)
# Rest take integer operands only
if typ == rtyp == 'integer':
return nr(nt=tok0, t=typ, ch=[lvalue, expr])
raise EParseTypeMismatch(self)
return lvalue
def Parse_unary_expression(self, AllowAssignment = True):
"""Grammar parsed here:
unary_expression: '-' factor | '!' unary_expression | '~' unary_expression
# we expand lvalue here to facilitate parsing
| '++' IDENT | '++' IDENT '.' IDENT
| '--' IDENT | '--' IDENT '.' IDENT
| '(' TYPE ')' typecast_expression | '(' expression ')'
| unary_postfix_expression
%NORMAL RULES ONLY:
typecast_expression: '(' expression ')'
| unary_postfix_expression %except assignment
%EXTENDED RULES ONLY:
typecast_expression: unary_expression %except assignment
"""
tok0 = self.tok[0]
if tok0 == '-':
# Unary minus
self.NextToken()
value = self.Parse_factor()
if value.t not in ('integer', 'float', 'vector', 'rotation'):
raise EParseTypeMismatch(self)
return nr(nt='NEG', t=value.t, ch=[value])
if tok0 in ('!', '~'):
# Unary logic and bitwise NOT - applies to integers only
self.NextToken()
value = self.Parse_unary_expression()
if value.t != 'integer':
raise EParseTypeMismatch(self)
return nr(nt=tok0, t='integer', ch=[value])
if tok0 in ('++', '--'):
# Pre-increment / pre-decrement
self.NextToken()
self.expect('IDENT')
name = self.tok[1]
sym = self.FindSymbolFull(name)
if sym is None or sym['Kind'] != 'v':
# Pretend it doesn't exist
raise EParseUndefined(self)
typ = sym['Type']
ret = nr(nt='IDENT', t=typ, name=name, scope=sym['Scope'])
self.NextToken()
if self.tok[0] == '.':
self.NextToken()
self.expect('IDENT')
self.ValidateField(typ, self.tok[1])
ret = nr(nt='FLD', t='float', ch=[ret], fld=self.tok[1])
self.NextToken()
typ = ret.t
if typ not in ('integer', 'float'):
raise EParseTypeMismatch(self)
return nr(nt='++V' if tok0 == '++' else '--V', t=typ, ch=[ret])
if tok0 == '(':
# Parenthesized expression or typecast
self.NextToken()
if self.tok[0] != 'TYPE':
# Parenthesized expression
expr = self.Parse_expression()
self.expect(')')
self.NextToken()
return expr
# Typecast
typ = self.tok[1]
self.NextToken()
self.expect(')')
self.NextToken()
if self.extendedtypecast:
# Allow any unary expression (except assignment). The type cast
# acts as a prefix operator.
# Deal with the case of minus a constant integer or float.
# E.g. ~(integer)-2*3 should be parsed as (~(integer)-2)*3
# and not as ~(integer)(-(2*3))
# Note ~(integer)-a*3 is also parsed as ~(integer)(-a)*3
# which is bordering a violation of the POLA because of the
# priority of - with respect to *. But the syntax is quite
# explicit: what is typecast is always a unary expression,
# therefore processed first.
if self.tok[0] == '-':
self.NextToken()
if self.tok[0] == 'INTEGER_VALUE':
expr = nr(nt='CONST', t='integer',
value=lslfuncs.neg(self.tok[1]))
self.NextToken()
elif self.tok[0] == 'FLOAT_VALUE':
expr = nr(nt='CONST', t='float',
value=lslfuncs.neg(self.tok[1]))
self.NextToken()
else:
expr = self.Parse_unary_expression(AllowAssignment = False)
expr = nr(nt='NEG', t=expr.t, ch=[expr])
else:
expr = self.Parse_unary_expression(AllowAssignment = False)
else:
if self.tok[0] == '(':
self.NextToken()
expr = self.Parse_expression()
self.expect(')')
self.NextToken()
else:
expr = self.Parse_unary_postfix_expression(AllowAssignment = False)
basetype = expr.t
if self.lazylists and basetype is None and expr.nt == 'SUBIDX':
if typ not in self.TypeToExtractionFunction:
raise EParseNoConversion(self)
fn = self.TypeToExtractionFunction[typ]
sym = self.FindSymbolFull(fn, globalonly=True)
assert sym is not None
fnparamtypes = sym['ParamTypes']
subparamtypes = [x.t for x in expr.ch]
if fnparamtypes != subparamtypes:
raise EParseFunctionMismatch(self)
return nr(nt='FNCALL', t=sym['Type'], name=fn, scope=0,
ch=expr.ch)
if typ == 'list' and basetype in types \
or basetype in ('integer', 'float') and typ in ('integer', 'float', 'string') \
or basetype == 'string' and typ in types \
or basetype == 'key' and typ in ('string', 'key') \
or basetype == 'vector' and typ in ('string', 'vector') \
or basetype == 'rotation' and typ in ('string', 'rotation') \
or basetype == 'list' and typ == 'string':
return nr(nt='CAST', t=typ, ch=[expr])
raise EParseTypeMismatch(self)
# Must be a postfix expression.
return self.Parse_unary_postfix_expression(AllowAssignment)
def Parse_factor(self):
"""Grammar parsed here:
factor: unary_expression | factor '*' unary_expression
| factor '/' unary_expresssion | factor '%' unary_expression
"""
factor = self.Parse_unary_expression()
while self.tok[0] in ('*', '/', '%'):
op = self.tok[0]
ltype = factor.t
# Acceptable types for LHS
if op in ('*', '/') and ltype not in ('integer', 'float',
'vector', 'rotation') \
or op == '%' and ltype not in ('integer', 'vector'):
raise EParseTypeMismatch(self)
self.NextToken()
rexpr = self.Parse_unary_expression()
rtype = rexpr.t
# Mod is easier to check for
if op == '%' and ltype != rtype:
raise EParseTypeMismatch(self)
if op == '%' or ltype == rtype == 'integer':
# Deal with the special cases first (it's easy)
factor = nr(nt=op, t=ltype, ch=[factor, rexpr])
else:
# Any integer must be promoted to float now
if ltype == 'integer':
ltype = 'float'
factor = self.autocastcheck(factor, ltype)
if rtype == 'integer':
rtype = 'float'
rexpr = self.autocastcheck(rexpr, rtype)
if ltype == 'float' and rtype in ('float', 'vector') \
or ltype == 'vector' and rtype in ('float', 'vector', 'rotation') \
or ltype == rtype == 'rotation':
if op == '/' and rtype == 'vector':
# Division by vector isn't valid
raise EParseTypeMismatch(self)
# The rest are valid
if ltype == 'float' and rtype == 'vector':
resulttype = rtype
elif ltype == rtype == 'vector':
resulttype = 'float'
else:
resulttype = ltype
factor = nr(nt=op, t=resulttype, ch=[factor, rexpr])
else:
raise EParseTypeMismatch(self)
return factor
def Parse_term(self):
"""Grammar parsed here:
term: factor | term '+' factor | term '-' factor
"""
term = self.Parse_factor()
while self.tok[0] in ('+', '-'):
op = self.tok[0]
ltype = term.t
if op == '+' and ltype not in types \
or op == '-' and ltype not in ('integer', 'float',
'vector', 'rotation'):
raise EParseTypeMismatch(self)
self.NextToken()
rexpr = self.Parse_factor()
rtype = rexpr.t
# This is necessary, but the reason is subtle.
# The types must match in principle (except integer/float), so it
# doesn't seem necessary to check rtype. But there's the case
# where the first element is a list, where the types don't need to
# match but the second type must make sense.
if op == '+' and rtype not in types:
#or op == '-' and rtype not in ('integer', 'float',
# 'vector', 'rotation'):
raise EParseTypeMismatch(self)
# Isolate the additions where the types match to make our life easier later
if op == '+' and (ltype == rtype or ltype == 'list' or rtype == 'list'):
if ltype == rtype == 'key':
# key + key is the only disallowed combo of equals
raise EParseTypeMismatch(self)
# Note that although list + nonlist is semantically the
# same as list + (list)nonlist, and similarly for
# nonlist + list, they don't compile to the same thing,
# so we don't act on self.explicitcast in this case.
if rtype == 'list':
ltype = rtype
term = nr(nt=op, t=ltype, ch=[term, rexpr])
elif self.allowkeyconcat and op == '+' \
and ltype in ('key', 'string') and rtype in ('key', 'string'):
# Allow string+key addition (but add explicit cast)
if ltype == 'key':
term = nr(nt=op, t=rtype,
ch=[nr(nt='CAST', t=rtype, ch=[term]), rexpr])
else:
term = nr(nt=op, t=ltype,
ch=[term, nr(nt='CAST', t=ltype, ch=[rexpr])])
elif ltype == 'key' or rtype == 'key':
# Only list + key or key + list is allowed, otherwise keys can't
# be added or subtracted with anything.
raise EParseTypeMismatch(self)
else:
if ltype == 'float':
# Promote rexpr to float
term = nr(nt=op, t=ltype,
ch=[term, self.autocastcheck(rexpr, ltype)])
else:
# Convert LHS to rtype if possible (note no keys get here)
term = nr(nt=op, t=rtype,
ch=[self.autocastcheck(term, rtype), rexpr])
return term
def Parse_shift(self):
"""Grammar parsed here:
shift: term | shift '<<' term | shift '>>' term
"""
shift = self.Parse_term()
while self.tok[0] in ('<<', '>>'):
if shift.t != 'integer':
raise EParseTypeMismatch(self)
op = self.tok[0]
self.NextToken()
rexpr = self.Parse_term()
if rexpr.t != 'integer':
raise EParseTypeMismatch(self)
shift = nr(nt=op, t='integer', ch=[shift , rexpr])
return shift
def Parse_inequality(self):
"""Grammar parsed here:
inequality: shift | inequality '<' shift | inequality '<=' shift
| inequality '>' shift | inequality '>=' shift
"""
inequality = self.Parse_shift()
while self.tok[0] in ('<', '<=', '>', '>='):
op = self.tok[0]
ltype = inequality.t
if ltype not in ('integer', 'float'):
raise EParseTypeMismatch(self)
self.NextToken()
rexpr = self.Parse_shift()
rtype = rexpr.t
if rtype not in ('integer', 'float'):
raise EParseTypeMismatch(self)
if ltype != rtype:
if rtype == 'float':
inequality = self.autocastcheck(inequality, rtype)
else:
rexpr = self.autocastcheck(rexpr, ltype)
inequality = nr(nt=op, t='integer', ch=[inequality, rexpr])
return inequality
def Parse_comparison(self):
"""Grammar parsed here:
comparison: inequality | comparison '==' inequality
| comparison '!=' inequality
"""
comparison = self.Parse_inequality()
while self.tok[0] in ('==', '!='):
op = self.tok[0]
ltype = comparison.t
if ltype not in types:
raise EParseTypeMismatch(self)
self.NextToken()
rexpr = self.Parse_inequality()
rtype = rexpr.t
if ltype == 'float':
rexpr = self.autocastcheck(rexpr, ltype)
else:
# For string & key, RHS (rtype) mandates the conversion
# (that's room for optimization: always compare strings)
comparison = self.autocastcheck(comparison, rtype)
comparison = nr(nt=op, t='integer', ch=[comparison, rexpr])
return comparison
def Parse_bitbool_factor(self):
"""Grammar parsed here:
bitbool_factor: comparison | bitbool_factor '&' comparison
"""
bitbool_factor = self.Parse_comparison()
while self.tok[0] == '&':
if bitbool_factor.t != 'integer':
raise EParseTypeMismatch(self)
op = self.tok[0]
self.NextToken()
rexpr = self.Parse_comparison()
if rexpr.t != 'integer':
raise EParseTypeMismatch(self)
bitbool_factor = nr(nt=op, t='integer', ch=[bitbool_factor, rexpr])
return bitbool_factor
def Parse_bitxor_term(self):
"""Grammar parsed here:
bitxor_term: bitbool_factor | bitxor_term '^' bitbool_factor
"""
bitxor_term = self.Parse_bitbool_factor()
while self.tok[0] == '^':
if bitxor_term.t != 'integer':
raise EParseTypeMismatch(self)
op = self.tok[0]
self.NextToken()
rexpr = self.Parse_bitbool_factor()
if rexpr.t != 'integer':
raise EParseTypeMismatch(self)
bitxor_term = nr(nt=op, t='integer', ch=[bitxor_term, rexpr])
return bitxor_term
def Parse_bitbool_term(self):
"""Grammar parsed here:
bitbool_term: bitxor_term | bitbool_term '|' bitxor_term
"""
bitbool_term = self.Parse_bitxor_term()
while self.tok[0] == '|':
if bitbool_term.t != 'integer':
raise EParseTypeMismatch(self)
op = self.tok[0]
self.NextToken()
rexpr = self.Parse_bitxor_term()
if rexpr.t != 'integer':
raise EParseTypeMismatch(self)
bitbool_term = nr(nt=op, t='integer', ch=[bitbool_term, rexpr])
return bitbool_term
def Parse_expression(self):
"""Grammar parsed here:
expression: bitbool_term | expression '||' bitbool_term
| expression '&&' bitbool_term
Most operators with same priority, in general, are executed in
right-to-left order but calculated with precedence left-to-right.
That is, the tree is generated LTR but traversed RTL (in post-order).
E.g. a-b+c is calculated (in RPN notation) as: c, b, a, swap, -, +
i.e. c is evaluated first and a last, but the operation is still (a-b)+c
which is normal LTR.
At this point we're just constructing the tree, so we follow normal
precedence rules.
"""
expression = self.Parse_bitbool_term()
while self.tok[0] in ('&&', '||'):
if expression.t != 'integer':
raise EParseTypeMismatch(self)
op = self.tok[0]
self.NextToken()
rexpr = self.Parse_bitbool_term()
if rexpr.t != 'integer':
raise EParseTypeMismatch(self)
expression = nr(nt=op, t='integer', ch=[expression, rexpr])
if not self.allowVoid and expression.t not in types:
raise EParseTypeMismatch(self)
return expression
def Parse_optional_expression_list(self, expected_types = None):
"""Grammar parsed here:
optional_expression_list: LAMBDA | expression_list
expression_list: expression | expression_list ',' expression
"""
# Recursive descendent parsers are nice, but not exempt of problems.
# We need to accept empty lists. This is a maze of which we get out
# with a dirty hack. Rather than attempt to parse as an expression and
# backtrack in case of error, we check the next token to see if it
# is one that closes the expression list.
# optional_expression_list is used by FOR loops (closed by ';' or ')'),
# list constants and lazy lists (closed by ']') and function arguments
# (closed by ')'). If it's not the right token, we'll err anyway upon
# return.
ret = []
idx = 0
if self.tok[0] not in (']', ')', ';'):
while True:
saveAllowVoid = self.allowVoid
self.allowVoid = True
expr = self.Parse_expression()
self.allowVoid = saveAllowVoid
if expr.nt == 'SUBIDX' and expr.t is None:
# Don't accept an untyped lazy list in expression lists
raise EParseTypeMismatch(self)
if False is not expected_types is not None:
if idx >= len(expected_types):
raise EParseFunctionMismatch(self)
try:
expr = self.autocastcheck(expr, expected_types[idx]);
except EParseTypeMismatch:
raise EParseFunctionMismatch(self)
elif expected_types is False and self.optenabled:
# don't accept void expressions if optimization is on
if expr.t not in types:
raise EParseTypeMismatch(self)
idx += 1
ret.append(expr)
if self.tok[0] != ',':
break
self.NextToken()
if False is not expected_types is not None and idx != len(expected_types):
raise EParseFunctionMismatch(self)
return ret
def Parse_statement(self, ReturnType, AllowDecl = False, AllowStSw = False,
InsideSwitch = False, InsideLoop = False):
"""Grammar parsed here:
statement: ';' | single_statement | code_block
single_statement: if_statement | while_statement | do_statement
| for_statement | jump_statement | state_statement | label_statement
| return_statement | declaration_statement | expression ';'
| switch_statement %if enableswitch
| case_statement %if enableswitch and InsideSwitch
| break_statement %if enableswitch and InsideSwitch or breakcont and InsideLoop
| continue_statement %if breakcont and InsideLoop
if_statement: IF '(' expression ')' statement ELSE statement
| IF '(' expression ')' statement
while_statement: WHILE '(' expression ')' statement
do_statement: DO statement WHILE '(' expression ')' ';'
for_statement: FOR '(' optional_expression_list ';' expression ';'
optional_expression_list ')' statement
jump_statement: JUMP IDENT ';'
state_statement: STATE DEFAULT ';' | STATE IDENT ';'
label_statement: '@' IDENT ';'
return_statement: RETURN ';' | RETURN expression ';'
declaration_statement: TYPE lvalue ';' | TYPE lvalue '=' expression ';'
switch_statement: SWITCH '(' expression ')' code_block
case_statement: CASE expression ':' | CASE expression code_block
| DEFAULT ':' | DEFAULT code_block
break_statement: BREAK ';'
continue_statement: CONTINUE ';'
There's a restriction: a *single* statement can not be a declaration.
For example: if (1) integer x; is not allowed.
Note that SWITCH expects a code block because CASE is a full statement
for us, rather than a label. So for example this wouldn't work:
switch (expr) case expr: stmt; // works in C but not in this processor
but this works in both: switch (expr) { case expr: stmt; }
"""
tok0 = self.tok[0]
if tok0 == '{':
return self.Parse_code_block(ReturnType, AllowStSw = AllowStSw,
InsideSwitch = InsideSwitch, InsideLoop = InsideLoop)
if tok0 == ';':
self.NextToken()
return nr(nt=';', t=None)
if tok0 == '@':
if not AllowDecl and self.optenabled:
raise EParseInvalidLabelOpt(self)
self.NextToken()
self.expect('IDENT')
name = self.tok[1]
if name in self.symtab[self.scopeindex]:
raise EParseAlreadyDefined(self)
# shrinknames *needs* all labels renamed, so they are out of the way
if self.duplabels or self.shrinknames:
# Duplicate labels allowed.
if name in self.locallabels or self.shrinknames:
# Generate a new unique name and attach it to the symbol.
unique = self.GenerateLabel()
self.AddSymbol('l', self.scopeindex, name, NewName=unique,
ref=0)
else:
# Use the existing name. Faster and more readable.
unique = name
self.locallabels.add(name)
self.AddSymbol('l', self.scopeindex, name, ref=0)
else:
# Duplicate labels disallowed.
# All labels go to a common pool local to the current function.
# Check if it's already there, and add it otherwise.
if name in self.locallabels:
raise EParseDuplicateLabel(self)
self.locallabels.add(name)
self.AddSymbol('l', self.scopeindex, name, ref=0)
self.NextToken()
self.expect(';')
self.NextToken()
return nr(nt='@', t=None, name=name, scope=self.scopeindex)
if tok0 == 'JUMP':
self.NextToken()
self.expect('IDENT')
name = self.tok[1]
sym = self.FindSymbolPartial(name, MustBeLabel=True)
jumpnode = nr(nt='JUMP', t=None, name=name, scope=None)
if not sym or sym['Kind'] != 'l':
# It might still be a forward reference, so we add it to the
# list of things to look up when done
self.jump_lookups.append((name, self.scopestack[:],
self.errorpos, jumpnode))
else:
jumpnode.scope = sym['Scope']
sym['ref'] += 1
self.NextToken()
self.expect(';')
self.NextToken()
return jumpnode
if tok0 == 'STATE':
if self.localevents is None:
if AllowStSw is False:
raise EParseCantChangeState(self)
if AllowStSw is None:
self.PruneBug.append((self.errorpos,
EParseCantChangeState))
self.NextToken()
if self.tok[0] not in ('DEFAULT', 'IDENT'):
raise EParseSyntax(self)
# State Switch only searches for states in the global scope
name = self.tok[1] if self.tok[0] == 'IDENT' else 'default'
if name not in self.symtab[0] and (name not in self.globals
or self.globals[name]['Kind'] != 's'):
raise EParseUndefined(self)
self.NextToken()
self.expect(';')
self.NextToken()
return nr(nt='STSW', t=None, name=name, scope=0)
if tok0 == 'RETURN':
savepos = self.errorpos
self.NextToken()
if self.tok[0] == ';':
value = None
else:
savepos = self.errorpos
saveAllowVoid = self.allowVoid
# Needed due to another LSL bug, see regr/void-in-return.lsl
self.allowVoid = True
value = self.Parse_expression()
self.allowVoid = saveAllowVoid
self.expect(';')
self.NextToken()
if ReturnType is None and value is not None:
# It follows the same rules as AllowStSw
if AllowStSw is False:
self.errorpos = savepos
raise EParseReturnShouldBeEmpty(self)
elif value.t is None:
if AllowStSw is None:
self.PruneBug.append((self.errorpos,
EParseReturnShouldBeEmpty))
self.PushScope()
scope = self.scopeindex
self.PopScope()
return nr(nt='{}', t=None, scope=scope,
ch=[nr(nt='EXPR', t=None, ch=[value]),
nr(nt='RETURN', t=None)])
else:
self.errorpos = savepos
raise EParseTypeMismatch(self)
if ReturnType is not None and value is None:
self.errorpos = savepos
raise EParseReturnIsEmpty(self)
if value is None:
return nr(nt='RETURN', t=None)
# Sets LastIsReturn flag too
return nr(nt='RETURN', t=None, LIR=True,
ch=[self.autocastcheck(value, ReturnType)])
if tok0 == 'IF':
ret = nr(nt='IF', t=None, ch=[])
self.NextToken()
self.expect('(')
self.NextToken()
ret.ch.append(self.Parse_expression())
self.expect(')')
self.NextToken()
savePruneBug = self.PruneBug
self.PruneBug = []
ret.ch.append(self.Parse_statement(ReturnType, AllowStSw = None, InsideLoop = InsideLoop))
if self.tok[0] == 'ELSE':
if AllowStSw is False and self.PruneBug:
self.errorpos = self.PruneBug[0][0]
raise self.PruneBug[0][1](self)
LastIsReturn = getattr(ret.ch[1], 'LIR', False)
self.NextToken()
ret.ch.append(self.Parse_statement(ReturnType,
AllowStSw = AllowStSw, InsideLoop = InsideLoop))
if AllowStSw is None:
savePruneBug += self.PruneBug
if LastIsReturn and getattr(ret.ch[2], 'LIR', False):
ret.LIR = True
self.PruneBug = savePruneBug
return ret
if tok0 == 'WHILE':
self.NextToken()
if self.breakcont:
# We may add braces - or not. The safe approach is to assume
# we always do and open a new scope for it. At worst it will be
# empty. At least it is not reflected as braces in the code if
# braces are not used.
#
# This is designed to deal with cases like:
# if (a) while (b) { ... break; }
#
# This works by adding braces around the while and the newly
# added label, like this:
# if (a) { while (b) { ... jump label; } @label; }
self.PushScope()
self.breakstack.append([self.GenerateLabel(), self.scopeindex,
0])
# Scope still unknown; if a block is opened, Parse_code_block()
# will fill it in.
self.continuestack.append([self.GenerateLabel(), None, 0])
self.expect('(')
self.NextToken()
condition = self.Parse_expression()
self.expect(')')
self.NextToken()
# To fix a problem with a corner case (LSL allows defining a label
# in a single statement, at the same scope as the loop, breaking
# some of our logic), we check if the statement is a label. If so,
# we pop the scope to parse the statement and push it again.
# It won't cause scope problems in turn because we won't add any
# break or continue labels if no break or continue statement is
# present, which it can't because the statement is a label.
if self.breakcont and self.tok[0] == '@':
self.PopScope()
stmt = self.Parse_statement(ReturnType, AllowStSw = True, InsideLoop = True)
self.PushScope()
else:
stmt = self.Parse_statement(ReturnType, AllowStSw = True, InsideLoop = True)
ret = nr(nt='WHILE', t=None, ch=[condition, stmt])
if self.breakcont:
last = self.continuestack.pop()
if last[2]:
assert ret.ch[1].nt == '{}'
ret.ch[1].ch.append(nr(nt='@', t=None, name=last[0],
scope=last[1]))
self.AddSymbol('l', last[1], last[0], ref=last[2])
last = self.breakstack.pop()
if last[2]:
assert last[1] is not None
ret = nr(nt='{}', t=None, scope=last[1], ch=[ret,
nr(nt='@', t=None, name=last[0], scope=last[1])])
self.AddSymbol('l', last[1], last[0], ref=last[2])
self.PopScope()
return ret
if tok0 == 'DO':
self.NextToken()
if self.breakcont:
self.PushScope()
self.breakstack.append([self.GenerateLabel(), self.scopeindex,
0])
# Scope still unknown; if a block is opened, Parse_code_block()
# will fill it in.
self.continuestack.append([self.GenerateLabel(), None, 0])
if self.breakcont and self.tok[0] == '@':
self.PopScope()
stmt = self.Parse_statement(ReturnType, AllowStSw = True,
InsideLoop = True)
self.PushScope()
else:
stmt = self.Parse_statement(ReturnType, AllowStSw = True,
InsideLoop = True)
self.expect('WHILE')
self.NextToken()
self.expect('(')
self.NextToken()
condition = self.Parse_expression()
self.expect(')')
self.NextToken()
self.expect(';')
self.NextToken()
ret = nr(nt='DO', t=None, ch=[stmt, condition])
if self.breakcont:
last = self.continuestack.pop()
if last[2]:
assert ret.ch[0].nt == '{}'
ret.ch[0].ch.append(nr(nt='@', t=None, name=last[0],
scope=last[1]))
self.AddSymbol('l', last[1], last[0], ref=last[2])
last = self.breakstack.pop()
if last[2]:
assert last[1] is not None
ret = nr(nt='{}', t=None, scope=last[1], ch=[ret,
nr(nt='@', t=None, name=last[0], scope=last[1])])
self.AddSymbol('l', last[1], last[0], ref=last[2])
self.PopScope()
return ret
if tok0 == 'FOR':
self.NextToken()
if self.breakcont:
self.PushScope()
self.breakstack.append([self.GenerateLabel(), self.scopeindex,
0])
# Scope still unknown; if a block is opened, Parse_code_block()
# will fill it in.
self.continuestack.append([self.GenerateLabel(), None, 0])
self.expect('(')
self.NextToken()
initializer = self.Parse_optional_expression_list()
self.expect(';')
self.NextToken()
condition = self.Parse_expression()
self.expect(';')
self.NextToken()
iterator = self.Parse_optional_expression_list()
self.expect(')')
self.NextToken()
if self.breakcont and self.tok[0] == '@':
self.PopScope()
stmt = self.Parse_statement(ReturnType, AllowStSw = True,
InsideLoop = True)
self.PushScope()
else:
stmt = self.Parse_statement(ReturnType, AllowStSw = True,
InsideLoop = True)
ret = nr(nt='FOR', t=None,
ch=[nr(nt='EXPRLIST', t=None, ch=initializer),
condition,
nr(nt='EXPRLIST', t=None, ch=iterator),
stmt
])
if self.breakcont:
last = self.continuestack.pop()
if last[2]:
assert ret.ch[3].nt == '{}'
ret.ch[3].ch.append(nr(nt='@', t=None, name=last[0],
scope=last[1]))
self.AddSymbol('l', last[1], last[0], ref=last[2])
last = self.breakstack.pop()
if last[2]:
assert last[1] is not None
ret = nr(nt='{}', t=None, scope=last[1], ch=[ret,
nr(nt='@', t=None, name=last[0], scope=last[1])])
self.AddSymbol('l', last[1], last[0], ref=last[2])
self.PopScope()
return ret
if tok0 == 'SWITCH':
self.NextToken()
self.expect('(')
self.NextToken()
expr = self.Parse_expression()
self.expect(')')
self.NextToken()
brk = self.GenerateLabel()
# Scope is determined in Parse_code_block()
self.breakstack.append([brk, None, 0])
blk = self.Parse_code_block(ReturnType, AllowStSw = AllowStSw,
InsideSwitch = True, InsideLoop = InsideLoop)
blkscope = self.breakstack[-1][1]
# Replace the block
# switch (expr1) { case expr2: stmts1; break; default: stmts2; }
# is translated to:
# {
# if (expr1==expr2) jump label1;
# jump labeldef;
#
# @label1;
# stmts1;
# jump labelbrk;
# @labeldef;
# stmts2;
# @labelbrk;
# }
# The prelude is the ifs and the jumps.
# The block gets the cases replaced with labels,
# and the breaks replaced with jumps.
switchcaselist = []
switchcasedefault = None
# Since label scope rules prevent us from being able to jump inside
# a nested block, only one nesting level is considered.
assert blk.nt == '{}'
blk = blk.ch # Disregard the '{}' - we'll add it back later
for idx in xrange(len(blk)):
if blk[idx].nt == 'CASE':
lbl = self.GenerateLabel()
switchcaselist.append((lbl, blk[idx].ch[0]))
self.AddSymbol('l', blkscope, lbl, ref=0)
blk[idx] = nr(nt='@', t=None, name=lbl, scope=blkscope)
elif blk[idx].nt == 'DEFAULTCASE':
if switchcasedefault is not None:
raise EParseManyDefaults(self)
lbl = self.GenerateLabel()
switchcasedefault = lbl
self.AddSymbol('l', blkscope, lbl, ref=0)
blk[idx] = nr(nt='@', name=lbl, scope=blkscope)
prelude = []
ltype = expr.t
for case in switchcaselist:
rexpr = case[1]
lexpr = expr
if ltype == 'float':
rexpr = self.autocastcheck(rexpr, ltype)
else:
# For string & key, RHS (rtype) mandates the conversion
# (that's room for optimization: always compare strings)
lexpr = self.autocastcheck(lexpr, rexpr.t)
prelude.append(nr(nt='IF', t=None, ch=[
nr(nt='==', t='integer', ch=[lexpr, rexpr]),
nr(nt='JUMP', t=None, name=case[0], scope=blkscope)
]))
self.symtab[blkscope][case[0]]['ref'] += 1
if switchcasedefault is None:
if self.errmissingdefault:
raise EParseMissingDefault(self)
# Check if it's worth adding a break label. If there's no
# executable code, there's no point. However, this check is
# insufficient. It misses SEF expressions. For that reason,
# this is best left up to a later optimizer that knows about
# SEF. But we do a preliminary elimination here.
if self.does_something(blk):
switchcasedefault = brk
else:
# Check if no code up to the default label does anything.
# If so, remove the label and don't generate the jump.
for i in xrange(len(blk)):
node = blk[i]
if (node.nt == '@' and node.name == switchcasedefault
and node.scope == blkscope):
switchcasedefault = None
del blk[i]
break
if self.does_something([node]):
break
del i, node
if switchcasedefault is not None:
prelude.append(nr(nt='JUMP', t=None, name=switchcasedefault,
scope=blkscope))
if switchcasedefault == brk:
# add a reference to it in the break stack
self.breakstack[-1][2] += 1
else:
self.symtab[blkscope][switchcasedefault]['ref'] += 1
last = self.breakstack.pop()
if last[2]:
blk.append(nr(nt='@', name=brk, scope=blkscope))
self.AddSymbol('l', blkscope, brk, ref=last[2])
return nr(nt='{}', t=None, scope=blkscope, ch=prelude + blk)
if tok0 == 'CASE':
if not InsideSwitch:
raise EParseInvalidCase(self, u"case")
if self.scopeindex != self.breakstack[-1][1]:
# If this block is nested and not the main switch block, this
# won't work. LSL label scope rules don't expose the nested
# labels. Nothing we can do about that.
raise EParseCaseNotAllowed(self, u"case")
self.NextToken()
expr = self.Parse_expression()
if self.tok[0] == ':':
self.NextToken()
elif self.tok[0] != '{':
raise EParseSyntax(self)
return nr(nt='CASE', t=None, ch=[expr])
if tok0 == 'DEFAULT':
if self.enableswitch:
if not InsideSwitch:
raise EParseInvalidCase(self, u"default")
if self.scopeindex != self.breakstack[-1][1]:
# If this block is nested and not the main switch block, this
# won't work. Label scope rules don't expose the nested
# labels. Nothing we can do about that.
raise EParseCaseNotAllowed(self, u"default")
self.NextToken()
if self.tok[0] == ':':
self.NextToken()
elif self.tok[0] != '{':
raise EParseSyntax(self)
return nr(nt='DEFAULTCASE', t=None)
# else fall through to eventually fail
if tok0 == 'BREAK':
if not self.breakstack:
raise EParseInvalidBreak(self)
self.NextToken()
n = -1
if self.tok[0] == 'INTEGER_VALUE':
if self.tok[1] <= 0:
raise EParseInvalidBrkContArg(self)
n = -self.tok[1]
self.NextToken()
self.expect(';')
self.NextToken()
try:
self.breakstack[n][2] += 1
except IndexError:
raise EParseInvalidBrkContArg(self)
return nr(nt='JUMP', t=None, name=self.breakstack[n][0],
scope=self.breakstack[n][1])
if tok0 == 'CONTINUE':
if not self.continuestack:
raise EParseInvalidCont(self)
self.NextToken()
n = -1
if self.tok[0] == 'INTEGER_VALUE':
if self.tok[1] <= 0:
raise EParseInvalidBrkContArg(self)
n = -self.tok[1]
self.NextToken()
self.expect(';')
self.NextToken()
if n == -1 and self.continuestack[-1][1] is None:
# We're not inside a block - 'continue' is essentially a nop
# e.g. while (cond) continue; is the same as while (cond) ;
return nr(nt=';', t='None')
try:
if self.continuestack[n][1] is None:
# this can happen with e.g.:
# while (cond) while (cond) while (cond) continue 3;
# Transform to while(cond) while(cond) while(cond) break 2;
# which is equivalent since there are no {}.
n += 1 # e.g. -3 -> -2
self.breakstack[n][2] += 1 # add a reference to the break
return nr(nt='JUMP', t=None, name=self.breakstack[n][0],
scope=self.breakstack[n][1])
except IndexError:
raise EParseInvalidBrkContArg(self)
self.continuestack[n][2] += 1
return nr(nt='JUMP', t=None, name=self.continuestack[n][0],
scope=self.continuestack[n][1])
if tok0 == 'TYPE':
if not AllowDecl:
raise EParseDeclarationScope(self)
typ = self.tok[1]
self.NextToken()
self.expect('IDENT')
name = self.tok[1]
if name in self.symtab[self.scopeindex]:
raise EParseAlreadyDefined(self)
self.NextToken()
value = None
decl = nr(nt='DECL', t=typ, name=name, scope=self.scopeindex)
if self.tok[0] == '=':
self.NextToken()
decl.ch = [self.autocastcheck(self.Parse_expression(), typ)]
self.expect(';')
self.NextToken()
self.AddSymbol('v', self.scopeindex, name, Type=typ)
return decl
# If none of the above, it must be an expression.
saveAllowVoid = self.allowVoid
self.allowVoid = True
value = self.Parse_expression()
self.allowVoid = saveAllowVoid
self.expect(';')
self.NextToken()
return nr(nt='EXPR', t=value.t, ch=[value])
def Parse_code_block(self, ReturnType, AllowStSw = False, InsideSwitch = False,
InsideLoop = False):
"""Grammar parsed here:
code_block: '{' statements '}'
statements: LAMBDA | statements statement
It receives the return type to expect for return statements.
"""
self.expect('{')
self.NextToken()
self.PushScope()
# Kludge to find the scope of the break (for switch) /
# continue (for loops) labels.
if self.breakstack: # non-empty iff inside loop or switch
if InsideSwitch and self.breakstack[-1][1] is None:
self.breakstack[-1][1] = self.scopeindex
if InsideLoop and self.continuestack[-1][1] is None:
self.continuestack[-1][1] = self.scopeindex
body = []
LastIsReturn = False
while True:
if self.tok[0] == '}':
self.closebrace = self.errorpos
break
stmt = self.Parse_statement(ReturnType, AllowDecl = True,
AllowStSw = AllowStSw, InsideSwitch = InsideSwitch,
InsideLoop = InsideLoop)
LastIsReturn = getattr(stmt, 'LIR', False)
body.append(stmt)
scope_braces = self.scopeindex
self.PopScope()
self.expect('}')
self.NextToken()
node = nr(nt='{}', t=None, scope=scope_braces, ch=body)
if LastIsReturn:
node.LIR = True
return node
def Parse_simple_expr(self, ForbidList=False):
"""Grammar parsed here:
simple_expr: simple_expr_except_list | list_simple_expr
simple_expr_except_list: STRING_VALUE | KEY_VALUE | VECTOR_VALUE
| ROTATION_VALUE | TRUE | FALSE | number_value
| '<' simple_expr ',' simple_expr ',' simple_expr '>'
| '<' simple_expr ',' simple_expr ',' simple_expr ',' simple_expr '>'
number_value: FLOAT_VALUE | INTEGER_VALUE | '-' FLOAT_VALUE | '-' INTEGER_VALUE
list_simple_expr: '[' ']' | '[' list_simple_expr_items ']'
list_simple_expr_items: simple_expr_except_list
| list_simple_expr_items ',' simple_expr_except_list
"""
tok = self.tok
self.NextToken()
if tok[0] in ('TRUE', 'FALSE'): # TRUE and FALSE don't admit sign in globals
return nr(nt='CONST', t='integer', value=int(tok[0]=='TRUE'))
if tok[0] in ('STRING_VALUE', 'KEY_VALUE', 'VECTOR_VALUE', 'ROTATION_VALUE', 'LIST_VALUE'):
val = tok[1]
if tok[0] == 'STRING_VALUE' and self.allowmultistrings:
while self.tok[0] == 'STRING_VALUE':
val += self.tok[1]
self.NextToken()
return nr(nt='CONST', t=lslcommon.PythonType2LSL[type(val)],
value=val)
if tok[0] == 'IDENT':
sym = self.FindSymbolPartial(tok[1])
# The parser accepts library function names here as valid variables
# (it chokes at RAIL in Mono, and at runtime in LSO for some types)
if sym is None or sym['Kind'] != 'v' and (sym['Kind'] != 'f'
or 'ParamNames' in sym): # only UDFs have ParamNames
raise EParseUndefined(self)
typ = sym['Type']
if ForbidList and lslcommon.LSO and typ == 'key':
# This attempts to reproduce LSO's behaviour that a key global
# var inside a list global definition takes a string value
# (SCR-295).
typ = 'string'
return nr(nt='IDENT', t=typ, name=tok[1],
scope=sym['Scope'] if sym['Kind'] == 'v' else 0)
if tok[0] == '<':
value = [self.Parse_simple_expr()]
self.autocastcheck(value[0], 'float')
self.expect(',')
self.NextToken()
value.append(self.Parse_simple_expr())
self.autocastcheck(value[1], 'float')
self.expect(',')
self.NextToken()
value.append(self.Parse_simple_expr())
self.autocastcheck(value[2], 'float')
if self.tok[0] == '>':
self.NextToken()
return nr(nt='VECTOR', t='vector', ch=value)
self.expect(',')
self.NextToken()
value.append(self.Parse_simple_expr())
self.autocastcheck(value[3], 'float')
self.expect('>')
self.NextToken()
return nr(nt='ROTATION', t='rotation', ch=value)
if tok[0] == '[' and not ForbidList:
value = []
if self.tok[0] == ']':
self.NextToken()
return nr(nt='LIST', t='list', ch=value)
while True:
value.append(self.Parse_simple_expr(ForbidList=True))
if self.tok[0] == ']':
self.NextToken()
return nr(nt='LIST', t='list', ch=value)
self.expect(',')
self.NextToken()
# Integer or Float constant expected
neg = False
if tok[0] == '-':
neg = True
tok = self.tok
self.NextToken()
if tok[0] not in ('INTEGER_VALUE', 'FLOAT_VALUE'):
raise EParseSyntax(self)
value = tok[1]
if neg and (tok[0] != 'INTEGER_VALUE' or value != -2147483648):
value = -value
return nr(nt='CONST',
t='float' if tok[0] == 'FLOAT_VALUE' else 'integer', value=value)
def Parse_optional_param_list(self):
"""Grammar parsed here:
optional_param_list: LAMBDA | param_list
param_list: TYPE IDENT | param_list ',' TYPE IDENT
"""
types = []
names = []
if self.tok[0] == 'TYPE':
while True:
typ = self.tok[1]
self.NextToken()
self.expect('IDENT')
name = self.tok[1]
if name in self.symtab[self.scopeindex]:
raise EParseAlreadyDefined(self)
types.append(typ)
names.append(name)
self.AddSymbol('v', self.scopeindex, name, Type=typ, Param=True)
self.NextToken()
if self.tok[0] != ',':
break
self.NextToken()
self.expect('TYPE')
return (types, names)
def Parse_events(self):
"""Grammar parsed here:
events: event | events event
event: EVENT_NAME '(' optional_parameter_list ')' code_block
"""
self.expect('EVENT_NAME') # mandatory
ret = []
while self.tok[0] == 'EVENT_NAME':
name = self.tok[1]
self.NextToken()
if name in self.localevents:
raise EParseAlreadyDefined(self)
self.localevents.add(name)
self.expect('(')
self.NextToken()
# Function parameters go to a dedicated symbol table.
self.PushScope()
params = self.Parse_optional_param_list()
self.expect(')')
self.NextToken()
# NOTE: Parse_events: This is a bit crude, as the error is given at the end of the param list.
# To do it correctly, we can pass the parameter list to Parse_optional_param_list().
if tuple(params[0]) != self.events[name]['pt']:
raise EParseSyntax(self)
self.locallabels = set()
body = self.Parse_code_block(None)
del self.locallabels
ret.append(nr(nt='FNDEF', t=None, name=name, # no scope as these are reserved words
pscope=self.scopeindex, ptypes=params[0], pnames=params[1],
ch=[body]))
self.PopScope()
return ret
def Parse_globals(self):
"""Grammar parsed here:
globals: LAMBDA | globals var_def | globals func_def
var_def: TYPE IDENT ';' | TYPE IDENT '=' simple_expr ';'
func_def: optional_type IDENT '(' optional_param_list ')' code_block
optional_type: LAMBDA | TYPE
"""
assert self.scopeindex == 0
while self.tok[0] in ('TYPE','IDENT'):
typ = None
if self.tok[0] == 'TYPE':
typ = self.tok[1]
self.NextToken()
self.expect('IDENT')
name = self.tok[1]
self.NextToken()
if name in self.symtab[0]:
# Duplicate identifier. That's an exception unless function
# override is in effect.
report = True
if self.funcoverride:
# Is it a function definition, and is the entry in the
# symbol table a function definition itself? And is it
# a user-defined function?
if self.tok[0] == '(' \
and self.symtab[0][name]['Kind'] == 'f' \
and 'Loc' in self.symtab[0][name]:
# Override it.
report = False
# Delete the previous definition.
self.tree[self.symtab[0][name]['Loc']] = \
nr(nt='LAMBDA', t=None)
del self.symtab[0][name]
if report:
raise EParseAlreadyDefined(self)
if self.tok[0] in ('=', ';'):
# This is a variable definition
if typ is None: # Typeless variables are not allowed
raise EParseSyntax(self)
if self.tok[0] == '=':
self.NextToken()
if self.extendedglobalexpr:
self.disallowglobalvars = True # Disallow forward globals.
# Mark backtracking position
pos = self.pos
errorpos = self.errorpos
tok = self.tok
try:
value = self.Parse_simple_expr()
self.expect(';')
value.Simple = True # Success - mark it as simple
except EParse:
# Backtrack
self.pos = pos
self.errorpos = errorpos
self.tok = tok
# Use advanced expression evaluation.
value = self.Parse_expression()
self.expect(';')
self.disallowglobalvars = False # Allow forward globals again.
else:
# Use LSL's dull global expression.
value = self.Parse_simple_expr()
self.expect(';')
value.Simple = True
else: # must be semicolon
value = None
assert self.scopeindex == 0
decl = nr(nt='DECL', t=typ, name=name, scope=0)
if value is not None:
value = self.autocastcheck(value, typ)
decl.ch = [value]
self.NextToken()
self.AddSymbol('v', 0, name, Loc=len(self.tree), Type=typ)
self.tree.append(decl)
elif self.tok[0] == '(':
# This is a function definition
self.NextToken()
self.PushScope()
params = self.Parse_optional_param_list()
self.expect(')')
self.NextToken()
self.localevents = None
self.locallabels = set()
force_inline = False
if (self.enable_inline and self.tok[0] == 'IDENT'
and self.tok[1] == 'inline'):
self.NextToken()
force_inline = True
body = self.Parse_code_block(typ)
del self.locallabels
if typ and not getattr(body, 'LIR', False): # is LastIsReturn flag set?
self.errorpos = self.closebrace
raise EParseCodePathWithoutRet(self)
paramscope = self.scopeindex
self.AddSymbol('f', 0, name, Loc=len(self.tree), Type=typ,
Inline=force_inline,
ParamTypes=params[0], ParamNames=params[1])
self.tree.append(nr(nt='FNDEF', t=typ, name=name, scope=0,
pscope=paramscope, ptypes=params[0], pnames=params[1],
ch=[body]))
self.PopScope()
assert self.scopeindex == 0
else:
raise EParseSyntax(self)
pass
def Parse_states(self):
"""Grammar parsed here:
states: LAMBDA | states state
state: state_header '{' events '}'
state_header: DEFAULT | STATE IDENT
(but we enforce DEFAULT to be the first token found, meaning there will
be at least one state and the first must be DEFAULT as in the original
grammar)
"""
self.expect('DEFAULT')
while True:
if self.tok[0] != 'DEFAULT' and self.tok[0] != 'STATE':
return
if self.tok[0] == 'DEFAULT':
name = 'default'
else:
self.NextToken()
if self.tok[0] != 'IDENT':
raise EParseSyntax(self)
name = self.tok[1]
if name in self.symtab[self.scopeindex]:
raise EParseAlreadyDefined(self)
assert self.scopeindex == 0
self.AddSymbol('s', 0, name, Loc=len(self.tree))
self.NextToken()
self.expect('{')
self.NextToken()
self.localevents = set()
events = self.Parse_events()
del self.localevents
self.expect('}')
self.tree.append(nr(nt='STDEF', t=None, name=name, scope=0,
ch=events))
self.NextToken()
def Parse_script(self):
"""Parses the whole LSL script
Grammar parsed here:
script: globals states EOF
"""
# We need a table of undefined jump references, to check later,
# as jumps are local, not global, and allow forward definitions.
# This avoids making one more pass, or making the first pass more
# detailed unnecessarily.
self.jump_lookups = []
self.globalmode = True
self.Parse_globals()
self.globalmode = False
self.Parse_states()
self.expect('EOF')
assert len(self.scopestack) == 1 and self.scopestack[0] == 0
# Check the pending jump targets to assign them the scope of the label.
for tgt in self.jump_lookups:
self.scopestack = tgt[1]
self.scopeindex = self.scopestack[-1]
sym = self.FindSymbolPartial(tgt[0], MustBeLabel = True)
if sym is None:
self.errorpos = tgt[2]
raise EParseUndefined(self)
tgt[3].scope = sym['Scope']
sym['ref'] += 1
del self.jump_lookups # Finished with it.
self.scopestack = [0]
def Parse_single_expression(self):
"""Parse the script as an expression, Used by lslcalc.
Grammar parsed here:
script: expression EOF
"""
value = self.Parse_expression()
self.tree.append(nr(nt='EXPR', t=value.t, ch=[value]))
self.expect('EOF')
return
def BuildTempGlobalsTable(self):
"""Build an approximate globals table.
If the script syntax is correct, the globals table will be accurate.
If it is not, it may contain too many or too few symbols (normally the
latter). This globals table is not the normal globals in the symbol
table; it's just needed to resolve forward references. It's temporary.
The grammar is approximately:
script: globals states
globals: [global [global [...]]]
global: [TYPE] IDENT '(' TYPE anytoken [',' TYPE anytoken [...]]
anytoken_except_comma balanced_braces_or_anything_else
| TYPE IDENT [anytoken_except_semicolon [...]] ';'
states: state [state [...]]
state: (DEFAULT | STATE IDENT) balanced_braces_or_anything_else
"""
ret = self.funclibrary.copy() # The library functions go here too.
# If there's a syntax error, that's not our business. We just return
# what we have so far. Doing a proper parse will determine the exact
# location and cause.
# Here we don't even care if it's duplicate - that will be caught
# when adding to the real symbol table.
# Scan globals
try:
while self.tok[0] not in ('DEFAULT', 'EOF'):
typ = None
if self.tok[0] == 'TYPE':
typ = self.tok[1]
self.NextToken()
if self.tok[0] != 'IDENT':
return ret
name = self.tok[1]
self.NextToken()
if self.tok[0] == '(':
# Function call
self.NextToken()
params = []
| if self.tok[0] != ')': | 10,276 | lcc_e | python | null | cd9c5ebc544ca1c92142cfa8e3c50e240e3bf5573ebbe13d |
|
#
# File: courseware/capa/responsetypes.py
#
"""
Problem response evaluation. Handles checking of student responses,
of a variety of types.
Used by capa_problem.py
"""
# TODO: Refactor this code and fix this issue.
# pylint: disable=attribute-defined-outside-init
# standard library imports
import abc
import cgi
import inspect
import json
import logging
import html5lib
import numbers
import numpy
import os
from pyparsing import ParseException
import sys
import random
import re
import requests
import subprocess
import textwrap
import traceback
import xml.sax.saxutils as saxutils
from cmath import isnan
from sys import float_info
from collections import namedtuple
from shapely.geometry import Point, MultiPoint
import dogstats_wrapper as dog_stats_api
# specific library imports
from calc import evaluator, UndefinedVariable
from . import correctmap
from .registry import TagRegistry
from datetime import datetime
from pytz import UTC
from .util import (
compare_with_tolerance, contextualize_text, convert_files_to_filenames,
is_list_of_files, find_with_default, default_tolerance, get_inner_html_from_xpath
)
from lxml import etree
from lxml.html.soupparser import fromstring as fromstring_bs # uses Beautiful Soup!!! FIXME?
import capa.xqueue_interface as xqueue_interface
import capa.safe_exec as safe_exec
from openedx.core.djangolib.markup import HTML, Text
log = logging.getLogger(__name__)
registry = TagRegistry()
CorrectMap = correctmap.CorrectMap
CORRECTMAP_PY = None
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
QUESTION_HINT_CORRECT_STYLE = 'feedback-hint-correct'
QUESTION_HINT_INCORRECT_STYLE = 'feedback-hint-incorrect'
QUESTION_HINT_LABEL_STYLE = 'hint-label'
QUESTION_HINT_TEXT_STYLE = 'hint-text'
QUESTION_HINT_MULTILINE = 'feedback-hint-multi'
#-----------------------------------------------------------------------------
# Exceptions
class LoncapaProblemError(Exception):
"""
Error in specification of a problem
"""
pass
class ResponseError(Exception):
"""
Error for failure in processing a response, including
exceptions that occur when executing a custom script.
"""
pass
class StudentInputError(Exception):
"""
Error for an invalid student input.
For example, submitting a string when the problem expects a number
"""
pass
#-----------------------------------------------------------------------------
#
# Main base class for CAPA responsetypes
class LoncapaResponse(object):
"""
Base class for CAPA responsetypes. Each response type (ie a capa question,
which is part of a capa problem) is represented as a subclass,
which should provide the following methods:
- get_score : evaluate the given student answers, and return a CorrectMap
- get_answers : provide a dict of the expected answers for this problem
Each subclass must also define the following attributes:
- tags : xhtml tags identifying this response (used in auto-registering)
In addition, these methods are optional:
- setup_response : find and note the answer input field IDs for the response; called
by __init__
- check_hint_condition : check to see if the student's answers satisfy a particular
condition for a hint to be displayed
- render_html : render this Response as HTML (must return XHTML-compliant string)
- __unicode__ : unicode representation of this Response
Each response type may also specify the following attributes:
- max_inputfields : (int) maximum number of answer input fields (checked in __init__
if not None)
- allowed_inputfields : list of allowed input fields (each a string) for this Response
- required_attributes : list of required attributes (each a string) on the main
response XML stanza
- hint_tag : xhtml tag identifying hint associated with this response inside
hintgroup
"""
__metaclass__ = abc.ABCMeta # abc = Abstract Base Class
tags = None
hint_tag = None
has_partial_credit = False
credit_type = []
max_inputfields = None
allowed_inputfields = []
required_attributes = []
# Overridable field that specifies whether this capa response type has support for
# for rendering on devices of different sizes and shapes.
# By default, we set this to False, allowing subclasses to override as appropriate.
multi_device_support = False
def __init__(self, xml, inputfields, context, system, capa_module, minimal_init):
"""
Init is passed the following arguments:
- xml : ElementTree of this Response
- inputfields : ordered list of ElementTrees for each input entry field in this Response
- context : script processor context
- system : LoncapaSystem instance which provides OS, rendering, and user context
- capa_module : Capa module, to access runtime
"""
self.xml = xml
self.inputfields = inputfields
self.context = context
self.capa_system = system
self.capa_module = capa_module # njp, note None
self.id = xml.get('id')
# The LoncapaProblemError messages here do not need to be translated as they are
# only displayed to the user when settings.DEBUG is True
for abox in inputfields:
if abox.tag not in self.allowed_inputfields:
msg = "%s: cannot have input field %s" % (
unicode(self), abox.tag)
msg += "\nSee XML source line %s" % getattr(
xml, 'sourceline', '<unavailable>')
raise LoncapaProblemError(msg)
if self.max_inputfields and len(inputfields) > self.max_inputfields:
msg = "%s: cannot have more than %s input fields" % (
unicode(self), self.max_inputfields)
msg += "\nSee XML source line %s" % getattr(
xml, 'sourceline', '<unavailable>')
raise LoncapaProblemError(msg)
for prop in self.required_attributes:
if not xml.get(prop):
msg = "Error in problem specification: %s missing required attribute %s" % (
unicode(self), prop)
msg += "\nSee XML source line %s" % getattr(
xml, 'sourceline', '<unavailable>')
raise LoncapaProblemError(msg)
# ordered list of answer_id values for this response
self.answer_ids = [x.get('id') for x in self.inputfields]
if self.max_inputfields == 1:
# for convenience
self.answer_id = self.answer_ids[0]
# map input_id -> maxpoints
self.maxpoints = dict()
for inputfield in self.inputfields:
# By default, each answerfield is worth 1 point
maxpoints = inputfield.get('points', '1')
self.maxpoints.update({inputfield.get('id'): int(maxpoints)})
if not minimal_init:
# dict for default answer map (provided in input elements)
self.default_answer_map = {}
for entry in self.inputfields:
answer = entry.get('correct_answer')
if answer:
self.default_answer_map[entry.get(
'id')] = contextualize_text(answer, self.context)
# Does this problem have partial credit?
# If so, what kind? Get it as a list of strings.
partial_credit = xml.xpath('.')[0].get('partial_credit', default=False)
if str(partial_credit).lower().strip() == 'false':
self.has_partial_credit = False
self.credit_type = []
else:
self.has_partial_credit = True
self.credit_type = partial_credit.split(',')
self.credit_type = [word.strip().lower() for word in self.credit_type]
if hasattr(self, 'setup_response'):
self.setup_response()
def get_max_score(self):
"""
Return the total maximum points of all answer fields under this Response
"""
return sum(self.maxpoints.values())
def render_html(self, renderer, response_msg=''):
"""
Return XHTML Element tree representation of this Response.
Arguments:
- renderer : procedure which produces HTML given an ElementTree
- response_msg: a message displayed at the end of the Response
"""
_ = self.capa_system.i18n.ugettext
# response_id = problem_id + response index
response_id = self.xml.attrib['id']
response_index = response_id.split('_')[-1]
# Translators: index here could be 1,2,3 and so on
response_label = _(u'Question {index}').format(index=response_index)
# wrap the content inside a section
tree = etree.Element('div')
tree.set('class', 'wrapper-problem-response')
tree.set('tabindex', '-1')
tree.set('aria-label', response_label)
tree.set('role', 'group')
if self.xml.get('multiple_inputtypes'):
# add <div> to wrap all inputtypes
content = etree.SubElement(tree, 'div')
content.set('class', 'multi-inputs-group')
content.set('role', 'group')
if self.xml.get('multiinput-group-label-id'):
content.set('aria-labelledby', self.xml.get('multiinput-group-label-id'))
if self.xml.get('multiinput-group_description_ids'):
content.set('aria-describedby', self.xml.get('multiinput-group_description_ids'))
else:
content = tree
# problem author can make this span display:inline
if self.xml.get('inline', ''):
tree.set('class', 'inline')
for item in self.xml:
# call provided procedure to do the rendering
item_xhtml = renderer(item)
if item_xhtml is not None:
content.append(item_xhtml)
tree.tail = self.xml.tail
# Add a <div> for the message at the end of the response
if response_msg:
content.append(self._render_response_msg_html(response_msg))
return tree
def evaluate_answers(self, student_answers, old_cmap):
"""
Called by capa_problem.LoncapaProblem to evaluate student answers, and to
generate hints (if any).
Returns the new CorrectMap, with (correctness,msg,hint,hintmode) for each answer_id.
"""
new_cmap = self.get_score(student_answers)
self.get_hints(convert_files_to_filenames(
student_answers), new_cmap, old_cmap)
return new_cmap
def make_hint_div(self, hint_node, correct, student_answer, question_tag,
label=None, hint_log=None, multiline_mode=False, log_extra=None):
"""
Returns the extended hint div based on the student_answer
or the empty string if, after processing all the arguments, there is no hint.
As a side effect, logs a tracking log event detailing the hint.
Keyword args:
* hint_node: xml node such as <optionhint>, holding extended hint text. May be passed in as None.
* correct: bool indication if the student answer is correct
* student_answer: list length 1 or more of string answers
(only checkboxes make multiple answers)
* question_tag: string name of enclosing question, e.g. 'choiceresponse'
* label: (optional) if None (the default), extracts the label from the node,
otherwise using this value. The value '' inhibits labeling of the hint.
* hint_log: (optional) hints to be used, passed in as list-of-dict format (below)
* multiline_mode: (optional) bool, default False, hints should be shown one-per line
* log_extra: (optional) dict items to be injected in the tracking log
There are many parameters to this method because a variety of extended hint contexts
all bottleneck through here. In addition, the caller must provide detailed background
information about the hint-trigger to go in the tracking log.
hint_log format: list of dicts with each hint as a 'text' key. Each dict has extra
information for logging, essentially recording the logic which triggered the feedback.
Case 1: records which choices triggered
e.g. [{'text': 'feedback 1', 'trigger': [{'choice': 'choice_0', 'selected': True}]},...
Case 2: a compound hint, the trigger list has 1 or more choices
e.g. [{'text': 'a hint', 'trigger':[{'choice': 'choice_0', 'selected': True},
{'choice': 'choice_1', 'selected':True}]}]
"""
_ = self.capa_system.i18n.ugettext
# 1. Establish the hint_texts
# This can lead to early-exit if the hint is blank.
if not hint_log:
# .text can be None when node has immediate children nodes
if hint_node is None or (hint_node.text is None and len(hint_node.getchildren()) == 0):
return ''
hint_text = get_inner_html_from_xpath(hint_node)
if not hint_text:
return ''
hint_log = [{'text': hint_text}]
# invariant: xxxx
# 2. Establish the label:
# Passed in, or from the node, or the default
if not label and hint_node is not None:
label = hint_node.get('label', None)
# Tricky: label None means output defaults, while '' means output empty label
if label is None:
if correct:
label = _(u'Correct:')
else:
label = _(u'Incorrect:')
# self.runtime.track_function('get_demand_hint', event_info)
# This this "feedback hint" event
event_info = dict()
event_info['module_id'] = self.capa_module.location.to_deprecated_string()
event_info['problem_part_id'] = self.id
event_info['trigger_type'] = 'single' # maybe be overwritten by log_extra
event_info['hint_label'] = label
event_info['hints'] = hint_log
event_info['correctness'] = correct
event_info['student_answer'] = student_answer
event_info['question_type'] = question_tag
if log_extra:
event_info.update(log_extra)
self.capa_module.runtime.track_function('edx.problem.hint.feedback_displayed', event_info)
# Form the div-wrapped hint texts
hints_wrap = HTML('').join(
[HTML('<div class="{question_hint_text_style}">{hint_content}</div>').format(
question_hint_text_style=QUESTION_HINT_TEXT_STYLE,
hint_content=HTML(dct.get('text'))
) for dct in hint_log]
)
if multiline_mode:
hints_wrap = HTML('<div class="{question_hint_multiline}">{hints_wrap}</div>').format(
question_hint_multiline=QUESTION_HINT_MULTILINE,
hints_wrap=hints_wrap
)
label_wrap = ''
if label:
label_wrap = HTML('<span class="{question_hint_label_style}">{label} </span>').format(
question_hint_label_style=QUESTION_HINT_LABEL_STYLE,
label=Text(label)
)
# Establish the outer style
if correct:
style = QUESTION_HINT_CORRECT_STYLE
else:
style = QUESTION_HINT_INCORRECT_STYLE
# Ready to go
return HTML('<div class="{st}"><div class="explanation-title">{text}</div>{lwrp}{hintswrap}</div>').format(
st=style,
text=Text(_("Answer")),
lwrp=label_wrap,
hintswrap=hints_wrap
)
def get_extended_hints(self, student_answers, new_cmap):
"""
Pull "extended hint" information out the xml based on the student answers,
installing it in the new_map for display.
Implemented by subclasses that have extended hints.
"""
pass
def get_hints(self, student_answers, new_cmap, old_cmap):
"""
Generate adaptive hints for this problem based on student answers, the old CorrectMap,
and the new CorrectMap produced by get_score.
Does not return anything.
Modifies new_cmap, by adding hints to answer_id entries as appropriate.
"""
hintfn = None
hint_function_provided = False
hintgroup = self.xml.find('hintgroup')
if hintgroup is not None:
hintfn = hintgroup.get('hintfn')
if hintfn is not None:
hint_function_provided = True
if hint_function_provided:
# if a hint function has been supplied, it will take precedence
# Hint is determined by a function defined in the <script> context; evaluate
# that function to obtain list of hint, hintmode for each answer_id.
# The function should take arguments (answer_ids, student_answers, new_cmap, old_cmap)
# and it should modify new_cmap as appropriate.
# We may extend this in the future to add another argument which provides a
# callback procedure to a social hint generation system.
global CORRECTMAP_PY
if CORRECTMAP_PY is None:
# We need the CorrectMap code for hint functions. No, this is not great.
CORRECTMAP_PY = inspect.getsource(correctmap)
code = (
CORRECTMAP_PY + "\n" +
self.context['script_code'] + "\n" +
textwrap.dedent("""
new_cmap = CorrectMap()
new_cmap.set_dict(new_cmap_dict)
old_cmap = CorrectMap()
old_cmap.set_dict(old_cmap_dict)
{hintfn}(answer_ids, student_answers, new_cmap, old_cmap)
new_cmap_dict.update(new_cmap.get_dict())
old_cmap_dict.update(old_cmap.get_dict())
""").format(hintfn=hintfn)
)
globals_dict = {
'answer_ids': self.answer_ids,
'student_answers': student_answers,
'new_cmap_dict': new_cmap.get_dict(),
'old_cmap_dict': old_cmap.get_dict(),
}
try:
safe_exec.safe_exec(
code,
globals_dict,
python_path=self.context['python_path'],
extra_files=self.context['extra_files'],
slug=self.id,
random_seed=self.context['seed'],
unsafely=self.capa_system.can_execute_unsafe_code(),
)
except Exception as err:
_ = self.capa_system.i18n.ugettext
msg = _('Error {err} in evaluating hint function {hintfn}.').format(err=err, hintfn=hintfn)
sourcenum = getattr(self.xml, 'sourceline', _('(Source code line unavailable)'))
msg += "\n" + _("See XML source line {sourcenum}.").format(sourcenum=sourcenum)
raise ResponseError(msg)
new_cmap.set_dict(globals_dict['new_cmap_dict'])
return
# no hint function provided
# hint specified by conditions and text dependent on conditions (a-la Loncapa design)
# see http://help.loncapa.org/cgi-bin/fom?file=291
#
# Example:
#
# <formularesponse samples="x@-5:5#11" id="11" answer="$answer">
# <textline size="25" />
# <hintgroup>
# <formulahint samples="x@-5:5#11" answer="$wrongans" name="inversegrad"></formulahint>
# <hintpart on="inversegrad">
# <text>You have inverted the slope in the question. The slope is
# (y2-y1)/(x2 - x1) you have the slope as (x2-x1)/(y2-y1).</text>
# </hintpart>
# </hintgroup>
# </formularesponse>
if (self.hint_tag is not None
and hintgroup is not None
and hintgroup.find(self.hint_tag) is not None
and hasattr(self, 'check_hint_condition')):
rephints = hintgroup.findall(self.hint_tag)
hints_to_show = self.check_hint_condition(
rephints, student_answers)
# can be 'on_request' or 'always' (default)
hintmode = hintgroup.get('mode', 'always')
for hintpart in hintgroup.findall('hintpart'):
if hintpart.get('on') in hints_to_show:
hint_text = hintpart.find('text').text
# make the hint appear after the last answer box in this
# response
aid = self.answer_ids[-1]
new_cmap.set_hint_and_mode(aid, hint_text, hintmode)
log.debug('after hint: new_cmap = %s', new_cmap)
else:
# If no other hint form matches, try extended hints.
self.get_extended_hints(student_answers, new_cmap)
@abc.abstractmethod
def get_score(self, student_answers):
"""
Return a CorrectMap for the answers expected vs given. This includes
(correctness, npoints, msg) for each answer_id.
Arguments:
- student_answers : dict of (answer_id, answer) where answer = student input (string)
"""
pass
@abc.abstractmethod
def get_answers(self):
"""
Return a dict of (answer_id, answer_text) for each answer for this question.
"""
pass
def check_hint_condition(self, hxml_set, student_answers):
"""
Return a list of hints to show.
- hxml_set : list of Element trees, each specifying a condition to be
satisfied for a named hint condition
- student_answers : dict of student answers
Returns a list of names of hint conditions which were satisfied. Those are used
to determine which hints are displayed.
"""
pass
def setup_response(self):
pass
def __unicode__(self):
return u'LoncapaProblem Response %s' % self.xml.tag
def _render_response_msg_html(self, response_msg):
""" Render a <div> for a message that applies to the entire response.
*response_msg* is a string, which may contain XHTML markup
Returns an etree element representing the response message <div> """
# First try wrapping the text in a <div> and parsing
# it as an XHTML tree
try:
response_msg_div = etree.XML('<div>%s</div>' % str(response_msg))
# If we can't do that, create the <div> and set the message
# as the text of the <div>
except Exception: # pylint: disable=broad-except
response_msg_div = etree.Element('div')
response_msg_div.text = str(response_msg)
# Set the css class of the message <div>
response_msg_div.set("class", "response_message")
return response_msg_div
# These accessor functions allow polymorphic checking of response
# objects without having to call hasattr() directly.
def has_mask(self):
"""True if the response has masking."""
return hasattr(self, '_has_mask')
def has_shuffle(self):
"""True if the response has a shuffle transformation."""
return hasattr(self, '_has_shuffle')
def has_answerpool(self):
"""True if the response has an answer-pool transformation."""
return hasattr(self, '_has_answerpool')
#-----------------------------------------------------------------------------
@registry.register
class ChoiceResponse(LoncapaResponse):
"""
This response type is used when the student chooses from a discrete set of
choices. Currently, to be marked correct, all "correct" choices must be
supplied by the student, and no extraneous choices may be included.
This response type allows for two inputtypes: radiogroups and checkbox
groups. radiogroups are used when the student should select a single answer,
and checkbox groups are used when the student may supply 0+ answers.
Note: it is suggested to include a "None of the above" choice when no
answer is correct for a checkboxgroup inputtype; this ensures that a student
must actively mark something to get credit.
If two choices are marked as correct with a radiogroup, the student will
have no way to get the answer right.
TODO: Allow for marking choices as 'optional' and 'required', which would
not penalize a student for including optional answers and would also allow
for questions in which the student can supply one out of a set of correct
answers.This would also allow for survey-style questions in which all
answers are correct.
Example:
<choiceresponse>
<radiogroup>
<choice correct="false">
<text>This is a wrong answer.</text>
</choice>
<choice correct="true">
<text>This is the right answer.</text>
</choice>
<choice correct="false">
<text>This is another wrong answer.</text>
</choice>
</radiogroup>
</choiceresponse>
In the above example, radiogroup can be replaced with checkboxgroup to allow
the student to select more than one choice.
TODO: In order for the inputtypes to render properly, this response type
must run setup_response prior to the input type rendering. Specifically, the
choices must be given names. This behavior seems like a leaky abstraction,
and it'd be nice to change this at some point.
"""
human_name = _('Checkboxes')
tags = ['choiceresponse']
max_inputfields = 1
allowed_inputfields = ['checkboxgroup', 'radiogroup']
correct_choices = None
multi_device_support = True
def setup_response(self):
self.assign_choice_names()
self.correct_choices = set()
self.incorrect_choices = set()
for choice in self.get_choices():
# contextualize the name and correct attributes
name = contextualize_text(choice.get('name'), self.context)
correct = contextualize_text(choice.get('correct'), self.context).upper()
# divide choices into correct and incorrect
if correct == 'TRUE':
self.correct_choices.add(name)
elif correct == 'FALSE':
self.incorrect_choices.add(name)
def get_choices(self):
"""Returns this response's XML choice elements."""
return self.xml.xpath('//*[@id=$id]//choice', id=self.xml.get('id'))
def assign_choice_names(self):
"""
Initialize name attributes in <choice> tags for this response.
"""
for index, choice in enumerate(self.get_choices()):
choice.set("name", "choice_" + str(index))
# If a choice does not have an id, assign 'A' 'B', .. used by CompoundHint
if not choice.get('id'):
choice.set("id", chr(ord("A") + index))
def grade_via_every_decision_counts(self, **kwargs):
"""
Calculates partial credit on the Every Decision Counts scheme.
For each correctly selected or correctly blank choice, score 1 point.
Divide by total number of choices.
Arguments:
all_choices, the full set of checkboxes
student_answer, what the student actually chose
student_non_answers, what the student didn't choose
Returns a CorrectMap.
"""
all_choices = kwargs['all_choices']
student_answer = kwargs['student_answer']
student_non_answers = kwargs['student_non_answers']
edc_max_grade = len(all_choices)
edc_current_grade = 0
good_answers = sum([1 for answer in student_answer if answer in self.correct_choices])
good_non_answers = sum([1 for blank in student_non_answers if blank in self.incorrect_choices])
edc_current_grade = good_answers + good_non_answers
return_grade = round(self.get_max_score() * float(edc_current_grade) / float(edc_max_grade), 2)
if edc_current_grade == edc_max_grade:
return CorrectMap(self.answer_id, correctness='correct')
elif edc_current_grade > 0:
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=return_grade)
else:
return CorrectMap(self.answer_id, correctness='incorrect', npoints=0)
def grade_via_halves(self, **kwargs):
"""
Calculates partial credit on the Halves scheme.
If no errors, full credit.
If one error, half credit as long as there are 3+ choices
If two errors, 1/4 credit as long as there are 5+ choices
(If not enough choices, no credit.)
Arguments:
all_choices, the full set of checkboxes
student_answer, what the student actually chose
student_non_answers, what the student didn't choose
Returns a CorrectMap
"""
all_choices = kwargs['all_choices']
student_answer = kwargs['student_answer']
student_non_answers = kwargs['student_non_answers']
halves_error_count = 0
incorrect_answers = sum([1 for answer in student_answer if answer in self.incorrect_choices])
missed_answers = sum([1 for blank in student_non_answers if blank in self.correct_choices])
halves_error_count = incorrect_answers + missed_answers
if halves_error_count == 0:
return_grade = self.get_max_score()
return CorrectMap(self.answer_id, correctness='correct', npoints=return_grade)
elif halves_error_count == 1 and len(all_choices) > 2:
return_grade = round(self.get_max_score() / 2.0, 2)
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=return_grade)
elif halves_error_count == 2 and len(all_choices) > 4:
return_grade = round(self.get_max_score() / 4.0, 2)
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=return_grade)
else:
return CorrectMap(self.answer_id, 'incorrect')
def grade_without_partial_credit(self, **kwargs):
"""
Standard grading for checkbox problems.
100% credit if all choices are correct; 0% otherwise
Arguments: student_answer, which is the items the student actually chose
"""
student_answer = kwargs['student_answer']
required_selected = len(self.correct_choices - student_answer) == 0
no_extra_selected = len(student_answer - self.correct_choices) == 0
correct = required_selected & no_extra_selected
if correct:
return CorrectMap(self.answer_id, 'correct')
else:
return CorrectMap(self.answer_id, 'incorrect')
def get_score(self, student_answers):
# Setting up answer sets:
# all_choices: the full set of checkboxes
# student_answer: what the student actually chose (note no "s")
# student_non_answers: what they didn't choose
# self.correct_choices: boxes that should be checked
# self.incorrect_choices: boxes that should NOT be checked
all_choices = self.correct_choices.union(self.incorrect_choices)
student_answer = student_answers.get(self.answer_id, [])
if not isinstance(student_answer, list):
student_answer = [student_answer]
# When a student leaves all the boxes unmarked, edX throws an error.
# This line checks for blank answers so that we can throw "false".
# This is not ideal. "None apply" should be a valid choice.
# Sadly, this is not the place where we can fix that problem.
empty_answer = student_answer == []
if empty_answer:
return CorrectMap(self.answer_id, 'incorrect')
student_answer = set(student_answer)
student_non_answers = all_choices - student_answer
# No partial credit? Get grade right now.
if not self.has_partial_credit:
return self.grade_without_partial_credit(student_answer=student_answer)
# This below checks to see whether we're using an alternate grading scheme.
# Set partial_credit="false" (or remove it) to require an exact answer for any credit.
# Set partial_credit="EDC" to count each choice for equal points (Every Decision Counts).
# Set partial_credit="halves" to take half credit off for each error.
# Translators: 'partial_credit' and the items in the 'graders' object
# are attribute names or values and should not be translated.
graders = {
'edc': self.grade_via_every_decision_counts,
'halves': self.grade_via_halves,
'false': self.grade_without_partial_credit
}
# Only one type of credit at a time.
if len(self.credit_type) > 1:
raise LoncapaProblemError('Only one type of partial credit is allowed for Checkbox problems.')
# Make sure we're using an approved style.
if self.credit_type[0] not in graders:
raise LoncapaProblemError('partial_credit attribute should be one of: ' + ','.join(graders))
# Run the appropriate grader.
return graders[self.credit_type[0]](
all_choices=all_choices,
student_answer=student_answer,
student_non_answers=student_non_answers
)
def get_answers(self):
return {self.answer_id: list(self.correct_choices)}
def get_extended_hints(self, student_answers, new_cmap):
"""
Extract compound and extended hint information from the xml based on the student_answers.
The hint information goes into the msg= in new_cmap for display.
Each choice in the checkboxgroup can have 2 extended hints, matching the
case that the student has or has not selected that choice:
<checkboxgroup label="Select the best snack">
<choice correct="true">Donut
<choicehint selected="tRuE">A Hint!</choicehint>
<choicehint selected="false">Another hint!</choicehint>
</choice>
"""
# Tricky: student_answers may be *empty* here. That is the representation that
# no checkboxes were selected. For typical responsetypes, you look at
# student_answers[self.answer_id], but that does not work here.
# Compound hints are a special thing just for checkboxgroup, trying
# them first before the regular extended hints.
if self.get_compound_hints(new_cmap, student_answers):
return
# Look at all the choices - each can generate some hint text
choices = self.xml.xpath('//checkboxgroup[@id=$id]/choice', id=self.answer_id)
hint_log = []
label = None
label_count = 0
choice_all = []
# Tricky: in the case that the student selects nothing, there is simply
# no entry in student_answers, rather than an entry with the empty list value.
# That explains the following line.
student_choice_list = student_answers.get(self.answer_id, [])
# We build up several hints in hint_divs, then wrap it once at the end.
for choice in choices:
name = choice.get('name') # generated name, e.g. choice_2
choice_all.append(name)
selected = name in student_choice_list # looking for 'true' vs. 'false'
if selected:
selector = 'true'
else:
selector = 'false'
# We find the matching <choicehint> in python vs xpath so we can be case-insensitive
hint_nodes = choice.findall('./choicehint')
for hint_node in hint_nodes:
if hint_node.get('selected', '').lower() == selector:
text = get_inner_html_from_xpath(hint_node)
if hint_node.get('label') is not None: # tricky: label '' vs None is significant
label = hint_node.get('label')
label_count += 1
if text:
hint_log.append({'text': text, 'trigger': [{'choice': name, 'selected': selected}]})
if hint_log:
# Complication: if there is only a single label specified, we use it.
# However if there are multiple, we use none.
if label_count > 1:
label = None
new_cmap[self.answer_id]['msg'] += self.make_hint_div(
None,
new_cmap[self.answer_id]['correctness'] == 'correct',
student_choice_list,
self.tags[0],
label,
hint_log,
multiline_mode=True, # the one case where we do this
log_extra={'choice_all': choice_all} # checkbox specific logging
)
def get_compound_hints(self, new_cmap, student_answers):
"""
Compound hints are a type of extended hint specific to checkboxgroup with the
<compoundhint value="A C"> meaning choices A and C were selected.
Checks for a matching compound hint, installing it in new_cmap.
Returns True if compound condition hints were matched.
"""
compound_hint_matched = False
if self.answer_id in student_answers:
# First create a set of the student's selected ids
student_set = set()
names = []
for student_answer in student_answers[self.answer_id]:
choice_list = self.xml.xpath('//checkboxgroup[@id=$id]/choice[@name=$name]',
id=self.answer_id, name=student_answer)
if choice_list:
choice = choice_list[0]
student_set.add(choice.get('id').upper())
names.append(student_answer)
for compound_hint in self.xml.xpath('//checkboxgroup[@id=$id]/compoundhint', id=self.answer_id):
# Selector words are space separated and not case-sensitive
selectors = compound_hint.get('value').upper().split()
selector_set = set(selectors)
if selector_set == student_set:
# This is the atypical case where the hint text is in an inner div with its own style.
hint_text = compound_hint.text.strip()
# Compute the choice names just for logging
choices = self.xml.xpath('//checkboxgroup[@id=$id]/choice', id=self.answer_id)
choice_all = [choice.get('name') for choice in choices]
hint_log = [{'text': hint_text, 'trigger': [{'choice': name, 'selected': True} for name in names]}]
new_cmap[self.answer_id]['msg'] += self.make_hint_div(
compound_hint,
new_cmap[self.answer_id]['correctness'] == 'correct',
student_answers[self.answer_id],
self.tags[0],
hint_log=hint_log,
log_extra={'trigger_type': 'compound', 'choice_all': choice_all}
)
compound_hint_matched = True
break
return compound_hint_matched
#-----------------------------------------------------------------------------
@registry.register
class MultipleChoiceResponse(LoncapaResponse):
"""
Multiple Choice Response
The shuffle and answer-pool features on this class enable permuting and
subsetting the choices shown to the student.
Both features enable name "masking":
With masking, the regular names of multiplechoice choices
choice_0 choice_1 ... are not used. Instead we use random masked names
mask_2 mask_0 ... so that a view-source of the names reveals nothing about
the original order. We introduce the masked names right at init time, so the
whole software stack works with just the one system of naming.
The .has_mask() test on a response checks for masking, implemented by a
._has_mask attribute on the response object.
The logging functionality in capa_base calls the unmask functions here
to translate back to choice_0 name style for recording in the logs, so
the logging is in terms of the regular names.
"""
# TODO: randomize
human_name = _('Multiple Choice')
tags = ['multiplechoiceresponse']
max_inputfields = 1
allowed_inputfields = ['choicegroup']
correct_choices = None
multi_device_support = True
def setup_response(self):
"""
Collects information from the XML for later use.
correct_choices is a list of the correct choices.
partial_choices is a list of the partially-correct choices.
partial_values is a list of the scores that go with those
choices, defaulting to 0.5 if no value is specified.
"""
# call secondary setup for MultipleChoice questions, to set name
# attributes
self.mc_setup_response()
# define correct choices (after calling secondary setup)
xml = self.xml
cxml = xml.xpath('//*[@id=$id]//choice', id=xml.get('id'))
# contextualize correct attribute and then select ones for which
# correct = "true"
self.correct_choices = [
contextualize_text(choice.get('name'), self.context)
for choice in cxml
if contextualize_text(choice.get('correct'), self.context).upper() == "TRUE"
]
if self.has_partial_credit:
self.partial_choices = [
contextualize_text(choice.get('name'), self.context)
for choice in cxml
if contextualize_text(choice.get('correct'), self.context).lower() == 'partial'
]
self.partial_values = [
float(choice.get('point_value', default='0.5')) # Default partial credit: 50%
for choice in cxml
if contextualize_text(choice.get('correct'), self.context).lower() == 'partial'
]
def get_extended_hints(self, student_answer_dict, new_cmap):
"""
Extract any hints in a <choicegroup> matching the student's answers
<choicegroup label="What is your favorite color?" type="MultipleChoice">
<choice correct="false">Red
<choicehint>No, Blue!</choicehint>
</choice>
...
Any hint text is installed in the new_cmap.
"""
if self.answer_id in student_answer_dict:
student_answer = student_answer_dict[self.answer_id]
# Warning: mostly student_answer is a string, but sometimes it is a list of strings.
if isinstance(student_answer, list):
student_answer = student_answer[0]
# Find the named choice used by the student. Silently ignore a non-matching
# choice name.
choice = self.xml.find('./choicegroup[@id="{0}"]/choice[@name="{1}"]'.format(self.answer_id,
student_answer))
if choice is not None:
hint_node = choice.find('./choicehint')
new_cmap[self.answer_id]['msg'] += self.make_hint_div(
hint_node,
choice.get('correct').upper() == 'TRUE',
[student_answer],
self.tags[0]
)
def mc_setup_response(self):
"""
Initialize name attributes in <choice> stanzas in the <choicegroup> in this response.
Masks the choice names if applicable.
"""
i = 0
for response in self.xml.xpath("choicegroup"):
# Is Masking enabled? -- check for shuffle or answer-pool features
# Masking (self._has_mask) is off, to be re-enabled with a future PR.
rtype = response.get('type')
if rtype not in ["MultipleChoice"]:
# force choicegroup to be MultipleChoice if not valid
response.set("type", "MultipleChoice")
for choice in list(response):
# The regular, non-masked name:
if choice.get("name") is not None:
name = "choice_" + choice.get("name")
else:
name = "choice_" + str(i)
i += 1
# If using the masked name, e.g. mask_0, save the regular name
# to support unmasking later (for the logs).
# Masking is currently disabled so this code is commented, as
# the variable `mask_ids` is not defined. (the feature appears to not be fully implemented)
# The original work for masking was done by Nick Parlante as part of the OLI Hinting feature.
# if self.has_mask():
# mask_name = "mask_" + str(mask_ids.pop())
# self._mask_dict[mask_name] = name
# choice.set("name", mask_name)
# else:
choice.set("name", name)
def late_transforms(self, problem):
"""
Rearrangements run late in the __init__ process.
Cannot do these at response init time, as not enough
other stuff exists at that time.
"""
self.do_shuffle(self.xml, problem)
self.do_answer_pool(self.xml, problem)
def grade_via_points(self, **kwargs):
"""
Calculates partial credit based on the Points scheme.
Answer choices marked "partial" are given partial credit.
Default is 50%; other amounts may be set in point_value attributes.
Arguments: student_answers
Returns: a CorrectMap
"""
student_answers = kwargs['student_answers']
if (self.answer_id in student_answers
and student_answers[self.answer_id] in self.correct_choices):
return CorrectMap(self.answer_id, correctness='correct')
elif (
self.answer_id in student_answers
and student_answers[self.answer_id] in self.partial_choices
):
choice_index = self.partial_choices.index(student_answers[self.answer_id])
credit_amount = self.partial_values[choice_index]
return CorrectMap(self.answer_id, correctness='partially-correct', npoints=credit_amount)
else:
return CorrectMap(self.answer_id, 'incorrect')
def grade_without_partial_credit(self, **kwargs):
"""
Standard grading for multiple-choice problems.
100% credit if choices are correct; 0% otherwise
Arguments: student_answers
Returns: a CorrectMap
"""
student_answers = kwargs['student_answers']
if (self.answer_id in student_answers
and student_answers[self.answer_id] in self.correct_choices):
return CorrectMap(self.answer_id, correctness='correct')
else:
return CorrectMap(self.answer_id, 'incorrect')
def get_score(self, student_answers):
"""
grade student response.
"""
# No partial credit? Grade it right away.
if not self.has_partial_credit:
return self.grade_without_partial_credit(student_answers=student_answers)
# This below checks to see whether we're using an alternate grading scheme.
# Set partial_credit="false" (or remove it) to require an exact answer for any credit.
# Set partial_credit="points" to set specific point values for specific choices.
# Translators: 'partial_credit' and the items in the 'graders' object
# are attribute names or values and should not be translated.
graders = {
'points': self.grade_via_points,
'false': self.grade_without_partial_credit
}
# Only one type of credit at a time.
if len(self.credit_type) > 1:
raise LoncapaProblemError('Only one type of partial credit is allowed for Multiple Choice problems.')
# Make sure we're using an approved style.
if self.credit_type[0] not in graders:
raise LoncapaProblemError('partial_credit attribute should be one of: ' + ','.join(graders))
# Run the appropriate grader.
return graders[self.credit_type[0]](
student_answers=student_answers
)
def get_answers(self):
return {self.answer_id: self.correct_choices}
def unmask_name(self, name):
"""
Given a masked name, e.g. mask_2, returns the regular name, e.g. choice_0.
Fails with LoncapaProblemError if called on a response that is not masking.
"""
# if not self.has_mask():
# _ = self.capa_system.i18n.ugettext
# # Translators: 'unmask_name' is a method name and should not be translated.
# msg = "unmask_name called on response that is not masked"
# raise LoncapaProblemError(msg)
# return self._mask_dict[name] # TODO: this is not defined
raise NotImplementedError()
def unmask_order(self):
"""
Returns a list of the choice names in the order displayed to the user,
using the regular (non-masked) names.
"""
# With masking disabled, this computation remains interesting to see
# the displayed order, even though there is no unmasking.
choices = self.xml.xpath('choicegroup/choice')
return [choice.get("name") for choice in choices]
def do_shuffle(self, tree, problem):
"""
For a choicegroup with shuffle="true", shuffles the choices in-place in the given tree
based on the seed. Otherwise does nothing.
Raises LoncapaProblemError if both shuffle and answer-pool are active:
a problem should use one or the other but not both.
Does nothing if the tree has already been processed.
"""
# The tree is already pared down to this <multichoiceresponse> so this query just
# gets the child choicegroup (i.e. no leading //)
choicegroups = tree.xpath('choicegroup[@shuffle="true"]')
if choicegroups:
choicegroup = choicegroups[0]
if choicegroup.get('answer-pool') is not None:
_ = self.capa_system.i18n.ugettext
# Translators: 'shuffle' and 'answer-pool' are attribute names and should not be translated.
msg = _("Do not use shuffle and answer-pool at the same time")
raise LoncapaProblemError(msg)
# Note in the response that shuffling is done.
# Both to avoid double-processing, and to feed the logs.
if self.has_shuffle():
return
self._has_shuffle = True # pylint: disable=attribute-defined-outside-init
# Move elements from tree to list for shuffling, then put them back.
ordering = list(choicegroup.getchildren())
for choice in ordering:
choicegroup.remove(choice)
ordering = self.shuffle_choices(ordering, self.get_rng(problem))
for choice in ordering:
choicegroup.append(choice)
def shuffle_choices(self, choices, rng):
"""
Returns a list of choice nodes with the shuffling done,
using the provided random number generator.
Choices with 'fixed'='true' are held back from the shuffle.
"""
# Separate out a list of the stuff to be shuffled
# vs. the head/tail of fixed==true choices to be held back from the shuffle.
# Rare corner case: A fixed==true choice "island" in the middle is lumped in
# with the tail group of fixed choices.
# Slightly tricky one-pass implementation using a state machine
head = []
middle = [] # only this one gets shuffled
tail = []
at_head = True
for choice in choices:
if at_head and choice.get('fixed') == 'true':
head.append(choice)
continue
at_head = False
if choice.get('fixed') == 'true':
tail.append(choice)
else:
middle.append(choice)
rng.shuffle(middle)
return head + middle + tail
def get_rng(self, problem):
"""
Get the random number generator to be shared by responses
of the problem, creating it on the problem if needed.
"""
# Multiple questions in a problem share one random number generator (rng) object
# stored on the problem. If each question got its own rng, the structure of multiple
# questions within a problem could appear predictable to the student,
# e.g. (c) keeps being the correct choice. This is due to the seed being
# defined at the problem level, so the multiple rng's would be seeded the same.
# The name _shared_rng begins with an _ to suggest that it is not a facility
# for general use.
# pylint: disable=protected-access
if not hasattr(problem, '_shared_rng'):
problem._shared_rng = random.Random(self.context['seed'])
return problem._shared_rng
def do_answer_pool(self, tree, problem):
"""
Implements the answer-pool subsetting operation in-place on the tree.
Allows for problem questions with a pool of answers, from which answer options shown to the student
and randomly selected so that there is always 1 correct answer and n-1 incorrect answers,
where the author specifies n as the value of the attribute "answer-pool" within <choicegroup>
The <choicegroup> tag must have an attribute 'answer-pool' giving the desired
pool size. If that attribute is zero or not present, no operation is performed.
Calling this a second time does nothing.
Raises LoncapaProblemError if the answer-pool value is not an integer,
or if the number of correct or incorrect choices available is zero.
"""
choicegroups = tree.xpath("choicegroup[@answer-pool]")
if choicegroups:
choicegroup = choicegroups[0]
num_str = choicegroup.get('answer-pool')
if num_str == '0':
return
try:
num_choices = int(num_str)
except ValueError:
_ = self.capa_system.i18n.ugettext
# Translators: 'answer-pool' is an attribute name and should not be translated.
msg = _("answer-pool value should be an integer")
raise LoncapaProblemError(msg)
# Note in the response that answerpool is done.
# Both to avoid double-processing, and to feed the logs.
if self.has_answerpool():
return
self._has_answerpool = True # pylint: disable=attribute-defined-outside-init
choices_list = list(choicegroup.getchildren())
# Remove all choices in the choices_list (we will add some back in later)
for choice in choices_list:
choicegroup.remove(choice)
rng = self.get_rng(problem) # random number generator to use
# Sample from the answer pool to get the subset choices and solution id
(solution_id, subset_choices) = self.sample_from_answer_pool(choices_list, rng, num_choices)
# Add back in randomly selected choices
for choice in subset_choices:
choicegroup.append(choice)
# Filter out solutions that don't correspond to the correct answer we selected to show
# Note that this means that if the user simply provides a <solution> tag, nothing is filtered
solutionset = choicegroup.xpath('../following-sibling::solutionset')
if len(solutionset) != 0:
solutionset = solutionset[0]
solutions = solutionset.xpath('./solution')
for solution in solutions:
if solution.get('explanation-id') != solution_id:
solutionset.remove(solution)
def sample_from_answer_pool(self, choices, rng, num_pool):
"""
Takes in:
1. list of choices
2. random number generator
3. the requested size "answer-pool" number, in effect a max
Returns a tuple with 2 items:
1. the solution_id corresponding with the chosen correct answer
2. (subset) list of choice nodes with num-1 incorrect and 1 correct
Raises an error if the number of correct or incorrect choices is 0.
"""
correct_choices = []
incorrect_choices = []
for choice in choices:
if choice.get('correct').upper() == 'TRUE':
correct_choices.append(choice)
else:
incorrect_choices.append(choice)
# In my small test, capa seems to treat the absence of any correct=
# attribute as equivalent to ="false", so that's what we do here.
# We raise an error if the problem is highly ill-formed.
# There must be at least one correct and one incorrect choice.
# IDEA: perhaps this sort semantic-lint constraint should be generalized to all multichoice
# not just down in this corner when answer-pool is used.
# Or perhaps in the overall author workflow, these errors are unhelpful and
# should all be removed.
if len(correct_choices) < 1 or len(incorrect_choices) < 1:
_ = self.capa_system.i18n.ugettext
# Translators: 'Choicegroup' is an input type and should not be translated.
msg = _("Choicegroup must include at least 1 correct and 1 incorrect choice")
raise LoncapaProblemError(msg)
# Limit the number of incorrect choices to what we actually have
num_incorrect = num_pool - 1
num_incorrect = min(num_incorrect, len(incorrect_choices))
# Select the one correct choice
index = rng.randint(0, len(correct_choices) - 1)
correct_choice = correct_choices[index]
solution_id = correct_choice.get('explanation-id')
# Put together the result, pushing most of the work onto rng.shuffle()
subset_choices = [correct_choice]
rng.shuffle(incorrect_choices)
subset_choices += incorrect_choices[:num_incorrect]
rng.shuffle(subset_choices)
return (solution_id, subset_choices)
@registry.register
class TrueFalseResponse(MultipleChoiceResponse):
human_name = _('True/False Choice')
tags = ['truefalseresponse']
def mc_setup_response(self):
i = 0
for response in self.xml.xpath("choicegroup"):
response.set("type", "TrueFalse")
for choice in list(response):
if choice.get("name") is None:
choice.set("name", "choice_" + str(i))
i += 1
else:
choice.set("name", "choice_" + choice.get("name"))
def get_score(self, student_answers):
correct = set(self.correct_choices)
answers = student_answers.get(self.answer_id, [])
if not isinstance(answers, list):
answers = [answers]
if correct == set(answers):
return CorrectMap(self.answer_id, 'correct')
return CorrectMap(self.answer_id, 'incorrect')
#-----------------------------------------------------------------------------
@registry.register
class OptionResponse(LoncapaResponse):
"""
TODO: handle randomize
"""
human_name = _('Dropdown')
tags = ['optionresponse']
hint_tag = 'optionhint'
allowed_inputfields = ['optioninput']
answer_fields = None
multi_device_support = True
def setup_response(self):
self.answer_fields = self.inputfields
def get_score(self, student_answers):
cmap = CorrectMap()
amap = self.get_answers()
for aid in amap:
if aid in student_answers and student_answers[aid] == amap[aid]:
cmap.set(aid, 'correct')
else:
cmap.set(aid, 'incorrect')
answer_variable = self.get_student_answer_variable_name(student_answers, aid)
if answer_variable:
cmap.set_property(aid, 'answervariable', answer_variable)
return cmap
def get_answers(self):
amap = dict([(af.get('id'), contextualize_text(af.get(
'correct'), self.context)) for af in self.answer_fields])
return amap
def get_student_answer_variable_name(self, student_answers, aid):
"""
Return student answers variable name if exist in context else None.
"""
if aid in student_answers:
for key, val in self.context.iteritems():
# convert val into unicode because student answer always be a unicode string
# even it is a list, dict etc.
if unicode(val) == student_answers[aid]:
return '$' + key
return None
def get_extended_hints(self, student_answers, new_cmap):
"""
Extract optioninput extended hint, e.g.
<optioninput>
<option correct="True">Donut <optionhint>Of course</optionhint> </option>
"""
answer_id = self.answer_ids[0] # Note *not* self.answer_id
if answer_id in student_answers:
student_answer = student_answers[answer_id]
# If we run into an old-style optioninput, there is no <option> tag, so this safely does nothing
options = self.xml.xpath('//optioninput[@id=$id]/option', id=answer_id)
# Extra pass here to ignore whitespace around the answer in the matching
options = [option for option in options if option.text.strip() == student_answer]
if options:
option = options[0]
hint_node = option.find('./optionhint')
if hint_node is not None:
new_cmap[answer_id]['msg'] += self.make_hint_div(
hint_node,
option.get('correct').upper() == 'TRUE',
[student_answer],
self.tags[0]
)
#-----------------------------------------------------------------------------
@registry.register
class NumericalResponse(LoncapaResponse):
"""
This response type expects a number or formulaic expression that evaluates
to a number (e.g. `4+5/2^2`), and accepts with a tolerance.
"""
human_name = _('Numerical Input')
tags = ['numericalresponse']
hint_tag = 'numericalhint'
allowed_inputfields = ['textline', 'formulaequationinput']
required_attributes = ['answer']
max_inputfields = 1
multi_device_support = True
def __init__(self, *args, **kwargs):
self.correct_answer = ''
self.additional_answers = []
self.additional_answer_index = -1
self.tolerance = default_tolerance
self.range_tolerance = False
self.answer_range = self.inclusion = None
super(NumericalResponse, self).__init__(*args, **kwargs)
def setup_response(self):
xml = self.xml
context = self.context
answer = xml.get('answer')
self.additional_answers = (
[element.get('answer') for element in xml.findall('additional_answer')]
)
if answer.startswith(('[', '(')) and answer.endswith((']', ')')): # range tolerance case
self.range_tolerance = True
self.inclusion = (
True if answer.startswith('[') else False, True if answer.endswith(']') else False
)
try:
self.answer_range = [contextualize_text(x, context) for x in answer[1:-1].split(',')]
self.correct_answer = answer[0] + self.answer_range[0] + ', ' + self.answer_range[1] + answer[-1]
except Exception:
log.debug("Content error--answer '%s' is not a valid range tolerance answer", answer)
_ = self.capa_system.i18n.ugettext
raise StudentInputError(
_("There was a problem with the staff answer to this problem.")
)
else:
self.correct_answer = contextualize_text(answer, context)
# Find the tolerance
tolerance_xml = xml.xpath(
'//*[@id=$id]//responseparam[@type="tolerance"]/@default',
id=xml.get('id')
)
if tolerance_xml: # If it isn't an empty list...
self.tolerance = contextualize_text(tolerance_xml[0], context)
def get_staff_ans(self, answer):
"""
Given the staff answer as a string, find its float value.
Use `evaluator` for this, but for backward compatability, try the
built-in method `complex` (which used to be the standard).
"""
try:
correct_ans = complex(answer)
except ValueError:
# When `correct_answer` is not of the form X+Yj, it raises a
# `ValueError`. Then test if instead it is a math expression.
# `complex` seems to only generate `ValueErrors`, only catch these.
try:
correct_ans = evaluator({}, {}, answer)
except Exception:
log.debug("Content error--answer '%s' is not a valid number", answer)
_ = self.capa_system.i18n.ugettext
raise StudentInputError(
_("There was a problem with the staff answer to this problem.")
)
return correct_ans
def get_score(self, student_answers):
"""
Grade a numeric response.
"""
if self.answer_id not in student_answers:
return CorrectMap(self.answer_id, 'incorrect')
# Make sure we're using an approved partial credit style.
# Currently implemented: 'close' and 'list'
if self.has_partial_credit:
graders = ['list', 'close']
for style in self.credit_type:
if style not in graders:
raise LoncapaProblemError('partial_credit attribute should be one of: ' + ','.join(graders))
student_answer = student_answers[self.answer_id]
_ = self.capa_system.i18n.ugettext
general_exception = StudentInputError(
_(u"Could not interpret '{student_answer}' as a number.").format(student_answer=cgi.escape(student_answer))
)
# Begin `evaluator` block
# Catch a bunch of exceptions and give nicer messages to the student.
try:
student_float = evaluator({}, {}, student_answer)
except UndefinedVariable as undef_var:
raise StudentInputError(
_(u"You may not use variables ({bad_variables}) in numerical problems.").format(
bad_variables=undef_var.message,
)
)
except ValueError as val_err:
if 'factorial' in val_err.message:
# This is thrown when fact() or factorial() is used in an answer
# that evaluates on negative and/or non-integer inputs
# ve.message will be: `factorial() only accepts integral values` or
# `factorial() not defined for negative values`
raise StudentInputError(
_("factorial function evaluated outside its domain:"
"'{student_answer}'").format(student_answer=cgi.escape(student_answer))
)
else:
raise general_exception
except ParseException:
raise StudentInputError(
_(u"Invalid math syntax: '{student_answer}'").format(student_answer=cgi.escape(student_answer))
)
except Exception:
raise general_exception
# End `evaluator` block -- we figured out the student's answer!
tree = self.xml
# What multiple of the tolerance is worth partial credit?
has_partial_range = tree.xpath('responseparam[@partial_range]')
if has_partial_range:
partial_range = float(has_partial_range[0].get('partial_range', default='2'))
else:
partial_range = 2
# Take in alternative answers that are worth partial credit.
has_partial_answers = tree.xpath('responseparam[@partial_answers]')
if has_partial_answers:
partial_answers = has_partial_answers[0].get('partial_answers').split(',')
for index, word in enumerate(partial_answers):
partial_answers[index] = word.strip()
partial_answers[index] = self.get_staff_ans(partial_answers[index])
else:
partial_answers = False
partial_score = 0.5
is_correct = 'incorrect'
if self.range_tolerance:
if isinstance(student_float, complex):
raise StudentInputError(_(u"You may not use complex numbers in range tolerance problems"))
boundaries = []
for inclusion, answer in zip(self.inclusion, self.answer_range):
boundary = self.get_staff_ans(answer)
if boundary.imag != 0:
raise StudentInputError(
# Translators: This is an error message for a math problem. If the instructor provided a
# boundary (end limit) for a variable that is a complex number (a + bi), this message displays.
_("There was a problem with the staff answer to this problem: complex boundary.")
)
if isnan(boundary):
raise StudentInputError(
# Translators: This is an error message for a math problem. If the instructor did not
# provide a boundary (end limit) for a variable, this message displays.
_("There was a problem with the staff answer to this problem: empty boundary.")
)
boundaries.append(boundary.real)
if compare_with_tolerance(
student_float,
boundary,
tolerance=float_info.epsilon,
relative_tolerance=True
):
is_correct = 'correct' if inclusion else 'incorrect'
break
else:
if boundaries[0] < student_float < boundaries[1]:
is_correct = 'correct'
else:
if self.has_partial_credit is False:
pass
elif 'close' in self.credit_type:
# Partial credit: 50% if the student is outside the specified boundaries,
# but within an extended set of boundaries.
extended_boundaries = []
boundary_range = boundaries[1] - boundaries[0]
extended_boundaries.append(boundaries[0] - partial_range * boundary_range)
extended_boundaries.append(boundaries[1] + partial_range * boundary_range)
if extended_boundaries[0] < student_float < extended_boundaries[1]:
is_correct = 'partially-correct'
else:
correct_float = self.get_staff_ans(self.correct_answer)
# Partial credit is available in three cases:
# If the student answer is within expanded tolerance of the actual answer,
# the student gets 50% credit. (Currently set as the default.)
# Set via partial_credit="close" in the numericalresponse tag.
#
# If the student answer is within regular tolerance of an alternative answer,
# the student gets 50% credit. (Same default.)
# Set via partial_credit="list"
#
# If the student answer is within expanded tolerance of an alternative answer,
# the student gets 25%. (We take the 50% and square it, at the moment.)
# Set via partial_credit="list,close" or "close, list" or the like.
if str(self.tolerance).endswith('%'):
expanded_tolerance = str(partial_range * float(str(self.tolerance)[:-1])) + '%'
else:
expanded_tolerance = partial_range * float(self.tolerance)
if compare_with_tolerance(student_float, correct_float, self.tolerance):
is_correct = 'correct'
elif self.has_partial_credit is False:
pass
elif 'list' in self.credit_type:
for value in partial_answers:
if compare_with_tolerance(student_float, value, self.tolerance):
is_correct = 'partially-correct'
break
elif 'close' in self.credit_type:
if compare_with_tolerance(student_float, correct_float, expanded_tolerance):
is_correct = 'partially-correct'
break
elif compare_with_tolerance(student_float, value, expanded_tolerance):
is_correct = 'partially-correct'
partial_score = partial_score * partial_score
break
elif 'close' in self.credit_type:
if compare_with_tolerance(student_float, correct_float, expanded_tolerance):
is_correct = 'partially-correct'
# Reset self.additional_answer_index to -1 so that we always have a fresh index to look up.
self.additional_answer_index = -1
# Compare with additional answers.
if is_correct == 'incorrect':
temp_additional_answer_idx = 0
for additional_answer in self.additional_answers:
staff_answer = self.get_staff_ans(additional_answer)
if complex(student_float) == staff_answer:
is_correct = 'correct'
self.additional_answer_index = temp_additional_answer_idx
break
temp_additional_answer_idx += 1
if is_correct == 'partially-correct':
return CorrectMap(self.answer_id, is_correct, npoints=partial_score)
else:
return CorrectMap(self.answer_id, is_correct)
def compare_answer(self, ans1, ans2):
"""
Outside-facing function that lets us compare two numerical answers,
with this problem's tolerance.
"""
return compare_with_tolerance(
evaluator({}, {}, ans1),
evaluator({}, {}, ans2),
self.tolerance
)
def validate_answer(self, answer):
"""
Returns whether this answer is in a valid form.
"""
try:
evaluator(dict(), dict(), answer)
return True
except (StudentInputError, UndefinedVariable):
return False
def get_answers(self):
_ = self.capa_system.i18n.ugettext
# Example: "Answer: Answer_1 or Answer_2 or Answer_3".
separator = Text(' {b_start}{or_separator}{b_end} ').format(
# Translators: Separator used in NumericalResponse to display multiple answers.
or_separator=_('or'),
b_start=HTML('<b>'),
b_end=HTML('</b>'),
)
return {self.answer_id: separator.join([self.correct_answer] + self.additional_answers)}
def set_cmap_msg(self, student_answers, new_cmap, hint_type, hint_index):
"""
Sets feedback to correct hint node in correct map.
Arguments:
student_answers (dict): Dict containing student input.
new_cmap (dict): Dict containing correct map properties.
hint_type (str): Hint type, either `correcthint` or `additional_answer`
hint_index (int): Index of the hint node
"""
# Note: using self.id here, not the more typical self.answer_id
hint_nodes = self.xml.xpath('//numericalresponse[@id=$id]/' + hint_type, id=self.id)
if hint_nodes:
hint_node = hint_nodes[hint_index]
if hint_type == 'additional_answer':
hint_node = hint_nodes[hint_index].find('./correcthint')
new_cmap[self.answer_id]['msg'] += self.make_hint_div(
hint_node,
True,
[student_answers[self.answer_id]],
self.tags[0]
)
def get_extended_hints(self, student_answers, new_cmap):
"""
Extract numericalresponse extended hint, e.g.
<correcthint>Yes, 1+1 IS 2<correcthint>
"""
if self.answer_id in student_answers:
if new_cmap.cmap[self.answer_id]['correctness'] == 'correct': # if the grader liked the student's answer
# Answer is not an additional answer.
if self.additional_answer_index == -1:
self.set_cmap_msg(student_answers, new_cmap, 'correcthint', 0)
else:
self.set_cmap_msg(student_answers, new_cmap, 'additional_answer', self.additional_answer_index)
#-----------------------------------------------------------------------------
@registry.register
class StringResponse(LoncapaResponse):
r"""
This response type allows one or more answers.
Additional answers are added by `additional_answer` tag.
If `regexp` is in `type` attribute, than answers and hints are treated as regular expressions.
Examples:
<stringresponse answer="Michigan">
<textline size="20" />
</stringresponse >
<stringresponse answer="a1" type="ci regexp">
<additional_answer>d5</additional_answer>
<additional_answer answer="a3"><correcthint>a hint - new format</correcthint></additional_answer>
<textline size="20"/>
<hintgroup>
<stringhint answer="a0" type="ci" name="ha0" />
<stringhint answer="a4" type="ci" name="ha4" />
<stringhint answer="^\d" type="ci" name="re1" />
<hintpart on="ha0">
<startouttext />+1<endouttext />
</hintpart >
<hintpart on="ha4">
<startouttext />-1<endouttext />
</hintpart >
<hintpart on="re1">
<startouttext />Any number+5<endouttext />
</hintpart >
</hintgroup>
</stringresponse>
"""
human_name = _('Text Input')
tags = ['stringresponse']
hint_tag = 'stringhint'
allowed_inputfields = ['textline']
required_attributes = ['answer']
max_inputfields = 1
correct_answer = []
multi_device_support = True
def setup_response_backward(self):
self.correct_answer = [
contextualize_text(answer, self.context).strip() for answer in self.xml.get('answer').split('_or_')
]
def setup_response(self):
self.backward = '_or_' in self.xml.get('answer').lower()
self.regexp = False
self.case_insensitive = False
if self.xml.get('type') is not None:
self.regexp = 'regexp' in self.xml.get('type').lower().split(' ')
self.case_insensitive = 'ci' in self.xml.get('type').lower().split(' ')
# backward compatibility, can be removed in future, it is up to @Lyla Fisher.
if self.backward:
self.setup_response_backward()
return
# end of backward compatibility
# XML compatibility note: in 2015, additional_answer switched to having a 'answer' attribute.
# See make_xml_compatible in capa_problem which translates the old format.
correct_answers = (
[self.xml.get('answer')] +
[element.get('answer') for element in self.xml.findall('additional_answer')]
)
self.correct_answer = [contextualize_text(answer, self.context).strip() for answer in correct_answers]
def get_score(self, student_answers):
"""Grade a string response """
if self.answer_id not in student_answers:
correct = False
else:
student_answer = student_answers[self.answer_id].strip()
correct = self.check_string(self.correct_answer, student_answer)
return CorrectMap(self.answer_id, 'correct' if correct else 'incorrect')
def check_string_backward(self, expected, given):
if self.case_insensitive:
return given.lower() in [i.lower() for i in expected]
return given in expected
def get_extended_hints(self, student_answers, new_cmap):
"""
Find and install extended hints in new_cmap depending on the student answers.
StringResponse is probably the most complicated form we have.
The forms show below match in the order given, and the first matching one stops the matching.
<stringresponse answer="A" type="ci">
<correcthint>hint1</correcthint> <!-- hint for correct answer -->
<additional_answer answer="B">hint2</additional_answer> <!-- additional_answer with its own hint -->
<stringequalhint answer="C">hint3</stringequalhint> <!-- string matcher/hint for an incorrect answer -->
<regexphint answer="FG+">hint4</regexphint> <!-- regex matcher/hint for an incorrect answer -->
<textline size="20"/>
</stringresponse>
The "ci" and "regexp" options are inherited from the parent stringresponse as appropriate.
"""
if self.answer_id in student_answers:
student_answer = student_answers[self.answer_id]
# Note the atypical case of using self.id instead of self.answer_id
responses = self.xml.xpath('//stringresponse[@id=$id]', id=self.id)
if responses:
response = responses[0]
# First call the existing check_string to see if this is a right answer by that test.
# It handles the various "ci" "regexp" cases internally.
expected = response.get('answer').strip()
if self.check_string([expected], student_answer):
hint_node = response.find('./correcthint')
if hint_node is not None:
new_cmap[self.answer_id]['msg'] += self.make_hint_div(
hint_node,
True,
[student_answer],
self.tags[0]
)
return
# Then look for additional answer with an answer= attribute
for node in response.findall('./additional_answer'):
if self.match_hint_node(node, student_answer, self.regexp, self.case_insensitive):
hint_node = node.find('./correcthint')
new_cmap[self.answer_id]['msg'] += self.make_hint_div(
hint_node,
True,
[student_answer],
self.tags[0]
)
return
# stringequalhint and regexphint represent wrong answers
for hint_node in response.findall('./stringequalhint'):
if self.match_hint_node(hint_node, student_answer, False, self.case_insensitive):
new_cmap[self.answer_id]['msg'] += self.make_hint_div(
hint_node,
False,
[student_answer],
self.tags[0]
)
return
for hint_node in response.findall('./regexphint'):
if self.match_hint_node(hint_node, student_answer, True, self.case_insensitive):
new_cmap[self.answer_id]['msg'] += self.make_hint_div(
hint_node,
False,
[student_answer],
self.tags[0]
)
return
def match_hint_node(self, node, given, regex_mode, ci_mode):
"""
Given an xml extended hint node such as additional_answer or regexphint,
which contain an answer= attribute, returns True if the given student answer is a match.
The boolean arguments regex_mode and ci_mode control how the answer stored in
the question is treated for the comparison (analogously to check_string).
"""
answer = node.get('answer', '').strip()
if not answer:
return False
if regex_mode:
flags = 0
if ci_mode:
flags = re.IGNORECASE
try:
# We follow the check_string convention/exception, adding ^ and $
regex = re.compile('^' + answer + '$', flags=flags | re.UNICODE)
return re.search(regex, given)
except Exception: # pylint: disable=broad-except
return False
if ci_mode:
return answer.lower() == given.lower()
else:
return answer == given
def check_string(self, expected, given):
"""
Find given in expected.
If self.regexp is true, regular expression search is used.
if self.case_insensitive is true, case insensitive search is used, otherwise case sensitive search is used.
Spaces around values of attributes are stripped in XML parsing step.
Args:
expected: list.
given: str.
Returns: bool
Raises: `ResponseError` if it fails to compile regular expression.
Note: for old code, which supports _or_ separator, we add some backward compatibility handling.
Should be removed soon. When to remove it, is up to Lyla Fisher.
"""
# if given answer is empty.
if not given:
return False
_ = self.capa_system.i18n.ugettext
# backward compatibility, should be removed in future.
if self.backward:
return self.check_string_backward(expected, given)
# end of backward compatibility
if self.regexp: # regexp match
flags = re.IGNORECASE if self.case_insensitive else 0
try:
regexp = re.compile('^' + '|'.join(expected) + '$', flags=flags | re.UNICODE)
result = re.search(regexp, given)
except Exception as err:
msg = u'[courseware.capa.responsetypes.stringresponse] {error}: {message}'.format(
error=_('error'),
message=err.message
)
log.error(msg, exc_info=True)
raise ResponseError(msg)
return bool(result)
else: # string match
if self.case_insensitive:
return given.lower() in [i.lower() for i in expected]
else:
return given in expected
def check_hint_condition(self, hxml_set, student_answers):
given = student_answers[self.answer_id].strip()
hints_to_show = []
for hxml in hxml_set:
name = hxml.get('name')
hinted_answer = contextualize_text(hxml.get('answer'), self.context).strip()
if self.check_string([hinted_answer], given):
hints_to_show.append(name)
log.debug('hints_to_show = %s', hints_to_show)
return hints_to_show
def get_answers(self):
_ = self.capa_system.i18n.ugettext
# Translators: Separator used in StringResponse to display multiple answers.
# Example: "Answer: Answer_1 or Answer_2 or Answer_3".
separator = u' <b>{}</b> '.format(_('or'))
return {self.answer_id: separator.join(self.correct_answer)}
#-----------------------------------------------------------------------------
@registry.register
class CustomResponse(LoncapaResponse):
"""
Custom response. The python code to be run should be in <answer>...</answer>
or in a <script>...</script>
"""
human_name = _('Custom Evaluated Script')
tags = ['customresponse']
allowed_inputfields = ['textline', 'textbox', 'crystallography',
'chemicalequationinput', 'vsepr_input',
'drag_and_drop_input', 'editamoleculeinput',
'designprotein2dinput', 'editageneinput',
'annotationinput', 'jsinput', 'formulaequationinput']
code = None
expect = None
# Standard amount for partial credit if not otherwise specified:
default_pc = 0.5
def setup_response(self):
xml = self.xml
# if <customresponse> has an "expect" (or "answer") attribute then save
# that
self.expect = contextualize_text(xml.get('expect') or xml.get('answer'), self.context)
log.debug('answer_ids=%s', self.answer_ids)
# the <answer>...</answer> stanza should be local to the current <customresponse>.
# So try looking there first.
self.code = None
answer = None
try:
answer = xml.xpath('//*[@id=$id]//answer', id=xml.get('id'))[0]
except IndexError:
# print "xml = ",etree.tostring(xml,pretty_print=True)
# if we have a "cfn" attribute then look for the function specified by cfn, in
# the problem context ie the comparison function is defined in the
# <script>...</script> stanza instead
cfn = xml.get('cfn')
if cfn:
log.debug("cfn = %s", cfn)
# This is a bit twisty. We used to grab the cfn function from
# the context, but now that we sandbox Python execution, we
# can't get functions from previous executions. So we make an
# actual function that will re-execute the original script,
# and invoke the function with the data needed.
def make_check_function(script_code, cfn):
def check_function(expect, ans, **kwargs):
extra_args = "".join(", {0}={0}".format(k) for k in kwargs)
code = (
script_code + "\n" +
"cfn_return = %s(expect, ans%s)\n" % (cfn, extra_args)
)
globals_dict = {
'expect': expect,
'ans': ans,
}
globals_dict.update(kwargs)
safe_exec.safe_exec(
code,
globals_dict,
python_path=self.context['python_path'],
extra_files=self.context['extra_files'],
slug=self.id,
random_seed=self.context['seed'],
unsafely=self.capa_system.can_execute_unsafe_code(),
)
return globals_dict['cfn_return']
return check_function
self.code = make_check_function(self.context['script_code'], cfn)
if not self.code:
if answer is None:
log.error("[courseware.capa.responsetypes.customresponse] missing"
" code checking script! id=%s", self.id)
self.code = ''
else:
answer_src = answer.get('src')
if answer_src is not None:
# TODO: this code seems not to be used any more since self.capa_system.filesystem doesn't exist.
self.code = self.capa_system.filesystem.open('src/' + answer_src).read()
else:
self.code = answer.text
def get_score(self, student_answers):
"""
student_answers is a dict with everything from request.POST, but with the first part
of each key removed (the string before the first "_").
"""
_ = self.capa_system.i18n.ugettext
log.debug('%s: student_answers=%s', unicode(self), student_answers)
# ordered list of answer id's
# sort the responses on the bases of the problem's position number
# which can be found in the last place in the problem id. Then convert
# this number into an int, so that we sort on ints instead of strings
idset = sorted(self.answer_ids, key=lambda x: int(x.split("_")[-1]))
try:
# ordered list of answers
submission = [student_answers[k] for k in idset]
except Exception as err:
msg = u"[courseware.capa.responsetypes.customresponse] {message}\n idset = {idset}, error = {err}".format(
message=_("error getting student answer from {student_answers}").format(
student_answers=student_answers,
),
idset=idset,
err=err
)
log.error(
"[courseware.capa.responsetypes.customresponse] error getting"
" student answer from %s"
"\n idset = %s, error = %s",
student_answers, idset, err
)
raise Exception(msg)
# global variable in context which holds the Presentation MathML from dynamic math input
# ordered list of dynamath responses
dynamath = [student_answers.get(k + '_dynamath', None) for k in idset]
# if there is only one box, and it's empty, then don't evaluate
if len(idset) == 1 and not submission[0]:
# default to no error message on empty answer (to be consistent with other
# responsetypes) but allow author to still have the old behavior by setting
# empty_answer_err attribute
msg = (u'<span class="inline-error">{0}</span>'.format(_(u'No answer entered!'))
if self.xml.get('empty_answer_err') else '')
return CorrectMap(idset[0], 'incorrect', msg=msg)
# NOTE: correct = 'unknown' could be dangerous. Inputtypes such as textline are
# not expecting 'unknown's
correct = ['unknown'] * len(idset)
messages = [''] * len(idset)
overall_message = ""
# put these in the context of the check function evaluator
# note that this doesn't help the "cfn" version - only the exec version
self.context.update({
# my ID
'response_id': self.id,
# expected answer (if given as attribute)
'expect': self.expect,
# ordered list of student answers from entry boxes in our subtree
'submission': submission,
# ordered list of ID's of all entry boxes in our subtree
'idset': idset,
# ordered list of all javascript inputs in our subtree
'dynamath': dynamath,
# dict of student's responses, with keys being entry box IDs
'answers': student_answers,
# the list to be filled in by the check function
'correct': correct,
# the list of messages to be filled in by the check function
'messages': messages,
# a message that applies to the entire response
# instead of a particular input
'overall_message': overall_message,
# any options to be passed to the cfn
'options': self.xml.get('options'),
'testdat': 'hello world',
})
# Pass DEBUG to the check function.
self.context['debug'] = self.capa_system.DEBUG
# Run the check function
self.execute_check_function(idset, submission)
# build map giving "correct"ness of the answer(s)
correct = self.context['correct']
messages = self.context['messages']
overall_message = self.clean_message_html(self.context['overall_message'])
grade_decimals = self.context.get('grade_decimals')
correct_map = CorrectMap()
correct_map.set_overall_message(overall_message)
for k in range(len(idset)):
max_points = self.maxpoints[idset[k]]
if grade_decimals:
npoints = max_points * grade_decimals[k]
else:
if correct[k] == 'correct':
npoints = max_points
elif correct[k] == 'partially-correct':
npoints = max_points * self.default_pc
else:
npoints = 0
correct_map.set(idset[k], correct[k], msg=messages[k],
npoints=npoints)
return correct_map
def execute_check_function(self, idset, submission):
# exec the check function
if isinstance(self.code, basestring):
try:
safe_exec.safe_exec(
self.code,
self.context,
cache=self.capa_system.cache,
python_path=self.context['python_path'],
extra_files=self.context['extra_files'],
slug=self.id,
random_seed=self.context['seed'],
unsafely=self.capa_system.can_execute_unsafe_code(),
)
except Exception as err: # pylint: disable=broad-except
self._handle_exec_exception(err)
else:
# self.code is not a string; it's a function we created earlier.
# this is an interface to the Tutor2 check functions
tutor_cfn = self.code
answer_given = submission[0] if (len(idset) == 1) else submission
kwnames = self.xml.get("cfn_extra_args", "").split()
kwargs = {n: self.context.get(n) for n in kwnames}
log.debug(" submission = %s", submission)
try:
ret = tutor_cfn(self.expect, answer_given, **kwargs)
except Exception as err: # pylint: disable=broad-except
self._handle_exec_exception(err)
log.debug(
"[courseware.capa.responsetypes.customresponse.get_score] ret = %s",
ret
)
if isinstance(ret, dict):
# One kind of dictionary the check function can return has the
# form {'ok': BOOLEAN or STRING, 'msg': STRING, 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)}
# 'ok' will control the checkmark, while grade_decimal, if present, will scale
# the score the student receives on the response.
# If there are multiple inputs, they all get marked
# to the same correct/incorrect value
if 'ok' in ret:
# Returning any falsy value or the "false" string for "ok" gives incorrect.
# Returning any string that includes "partial" for "ok" gives partial credit.
# Returning any other truthy value for "ok" gives correct
ok_val = str(ret['ok']).lower().strip() if bool(ret['ok']) else 'false'
if ok_val == 'false':
correct = 'incorrect'
elif 'partial' in ok_val:
correct = 'partially-correct'
else:
correct = 'correct'
correct = [correct] * len(idset) # All inputs share the same mark.
# old version, no partial credit:
# correct = ['correct' if ret['ok'] else 'incorrect'] * len(idset)
msg = ret.get('msg', None)
msg = self.clean_message_html(msg)
# If there is only one input, apply the message to that input
# Otherwise, apply the message to the whole problem
if len(idset) > 1:
self.context['overall_message'] = msg
else:
self.context['messages'][0] = msg
if 'grade_decimal' in ret:
decimal = float(ret['grade_decimal'])
else:
if correct[0] == 'correct':
decimal = 1.0
elif correct[0] == 'partially-correct':
decimal = self.default_pc
else:
decimal = 0.0
grade_decimals = [decimal] * len(idset)
self.context['grade_decimals'] = grade_decimals
# Another kind of dictionary the check function can return has
# the form:
# { 'overall_message': STRING,
# 'input_list': [
# {
# 'ok': BOOLEAN or STRING,
# 'msg': STRING,
# 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)
# },
# ...
# ]
# }
# 'ok' will control the checkmark, while grade_decimal, if present, will scale
# the score the student receives on the response.
#
# This allows the function to return an 'overall message'
# that applies to the entire problem, as well as correct/incorrect
# status, scaled grades, and messages for individual inputs
elif 'input_list' in ret:
overall_message = ret.get('overall_message', '')
input_list = ret['input_list']
correct = []
messages = []
grade_decimals = []
# Returning any falsy value or the "false" string for "ok" gives incorrect.
# Returning any string that includes "partial" for "ok" gives partial credit.
# Returning any other truthy value for "ok" gives correct
for input_dict in input_list:
if str(input_dict['ok']).lower().strip() == "false" or not input_dict['ok']:
correct.append('incorrect')
elif 'partial' in str(input_dict['ok']).lower().strip():
correct.append('partially-correct')
else:
correct.append('correct')
# old version, no partial credit
# correct.append('correct'
# if input_dict['ok'] else 'incorrect')
msg = (self.clean_message_html(input_dict['msg'])
if 'msg' in input_dict else None)
messages.append(msg)
if 'grade_decimal' in input_dict:
decimal = input_dict['grade_decimal']
else:
if str(input_dict['ok']).lower().strip() == 'true':
decimal = 1.0
elif 'partial' in str(input_dict['ok']).lower().strip():
decimal = self.default_pc
else:
decimal = 0.0
grade_decimals.append(decimal)
self.context['messages'] = messages
self.context['overall_message'] = overall_message
self.context['grade_decimals'] = grade_decimals
# Otherwise, we do not recognize the dictionary
# Raise an exception
else:
log.error(traceback.format_exc())
_ = self.capa_system.i18n.ugettext
raise ResponseError(
_("CustomResponse: check function returned an invalid dictionary!")
)
else:
# Returning any falsy value or the "false" string for "ok" gives incorrect.
# Returning any string that includes "partial" for "ok" gives partial credit.
# Returning any other truthy value for "ok" gives correct
if str(ret).lower().strip() == "false" or not bool(ret):
correct = 'incorrect'
elif 'partial' in str(ret).lower().strip():
correct = 'partially-correct'
else:
correct = 'correct'
correct = [correct] * len(idset)
# old version, no partial credit:
# correct = ['correct' if ret else 'incorrect'] * len(idset)
self.context['correct'] = correct
def clean_message_html(self, msg):
# If *msg* is an empty string, then the code below
# will return "</html>". To avoid this, we first check
# that *msg* is a non-empty string.
if msg:
# When we parse *msg* using etree, there needs to be a root
# element, so we wrap the *msg* text in <html> tags
msg = '<html>' + msg + '</html>'
# Replace < characters
msg = msg.replace('<', '<')
# Use etree to prettify the HTML
msg = etree.tostring(fromstring_bs(msg, convertEntities=None),
pretty_print=True)
msg = msg.replace(' ', '')
# Remove the <html> tags we introduced earlier, so we're
# left with just the prettified message markup
msg = re.sub('(?ms)<html>(.*)</html>', '\\1', msg)
# Strip leading and trailing whitespace
return msg.strip()
# If we start with an empty string, then return an empty string
else:
return ""
def get_answers(self):
"""
Give correct answer expected for this response.
use default_answer_map from entry elements (eg textline),
when this response has multiple entry objects.
but for simplicity, if an "expect" attribute was given by the content author
ie <customresponse expect="foo" ...> then that.
"""
if len(self.answer_ids) > 1:
return self.default_answer_map
if self.expect:
return {self.answer_ids[0]: self.expect}
return self.default_answer_map
def _handle_exec_exception(self, err):
"""
Handle an exception raised during the execution of
custom Python code.
Raises a ResponseError
"""
# Log the error if we are debugging
msg = 'Error occurred while evaluating CustomResponse'
log.warning(msg, exc_info=True)
# Notify student with a student input error
_, _, traceback_obj = sys.exc_info()
raise ResponseError(err.message, traceback_obj)
#-----------------------------------------------------------------------------
@registry.register
class SymbolicResponse(CustomResponse):
"""
Symbolic math response checking, using symmath library.
"""
human_name = _('Symbolic Math Input')
tags = ['symbolicresponse']
max_inputfields = 1
def setup_response(self):
# Symbolic response always uses symmath_check()
# If the XML did not specify this, then set it now
# Otherwise, we get an error from the superclass
self.xml.set('cfn', 'symmath_check')
# Let CustomResponse do its setup
super(SymbolicResponse, self).setup_response()
def execute_check_function(self, idset, submission):
from symmath import symmath_check
try:
# Since we have limited max_inputfields to 1,
# we can assume that there is only one submission
answer_given = submission[0]
ret = symmath_check(
self.expect, answer_given,
dynamath=self.context.get('dynamath'),
options=self.context.get('options'),
debug=self.context.get('debug'),
)
except Exception as err:
log.error("oops in SymbolicResponse (cfn) error %s", err)
log.error(traceback.format_exc())
_ = self.capa_system.i18n.ugettext
# Translators: 'SymbolicResponse' is a problem type and should not be translated.
msg = _(u"An error occurred with SymbolicResponse. The error was: {error_msg}").format(
error_msg=err,
)
raise Exception(msg)
self.context['messages'][0] = self.clean_message_html(ret['msg'])
self.context['correct'] = ['correct' if ret['ok'] else 'incorrect'] * len(idset)
#-----------------------------------------------------------------------------
## ScoreMessage named tuple ##
## valid: Flag indicating valid score_msg format (Boolean)
## correct: Correctness of submission (Boolean)
## score: Points to be assigned (numeric, can be float)
## msg: Message from grader to display to student (string)
ScoreMessage = namedtuple('ScoreMessage', ['valid', 'correct', 'points', 'msg'])
@registry.register
class CodeResponse(LoncapaResponse):
"""
Grade student code using an external queueing server, called 'xqueue'.
Expects 'xqueue' dict in LoncapaSystem with the following keys that are
needed by CodeResponse::
capa_system.xqueue = {
'interface': XQueueInterface object.
'construct_callback': Per-StudentModule callback URL constructor,
defaults to using 'score_update' as the correct dispatch (function).
'default_queuename': Default queue name to submit request (string).
}
External requests are only submitted for student submission grading, not
for getting reference answers.
"""
human_name = _('Code Input')
tags = ['coderesponse']
allowed_inputfields = ['textbox', 'filesubmission', 'matlabinput']
max_inputfields = 1
payload = None
initial_display = None
url = None
answer = None
queue_name = None
def setup_response(self):
"""
Configure CodeResponse from XML. Supports both CodeResponse and ExternalResponse XML
TODO: Determines whether in synchronous or asynchronous (queued) mode
"""
xml = self.xml
# TODO: XML can override external resource (grader/queue) URL
self.url = xml.get('url', None)
# We do not support xqueue within Studio.
if self.capa_system.xqueue is not None:
default_queuename = self.capa_system.xqueue['default_queuename']
else:
default_queuename = None
self.queue_name = xml.get('queuename', default_queuename)
# VS[compat]:
# Check if XML uses the ExternalResponse format or the generic
# CodeResponse format
codeparam = self.xml.find('codeparam')
assert codeparam is not None, "Unsupported old format! <coderesponse> without <codeparam>"
self._parse_coderesponse_xml(codeparam)
def _parse_coderesponse_xml(self, codeparam):
"""
Parse the new CodeResponse XML format. When successful, sets:
self.initial_display
self.answer (an answer to display to the student in the LMS)
self.payload
"""
grader_payload = codeparam.find('grader_payload')
grader_payload = grader_payload.text if grader_payload is not None else ''
self.payload = {
'grader_payload': grader_payload,
}
# matlab api key can be defined in course settings. if so, add it to the grader payload
api_key = getattr(self.capa_system, 'matlab_api_key', None)
if api_key and self.xml.find('matlabinput') is not None:
self.payload['token'] = api_key
self.payload['endpoint_version'] = "2"
self.payload['requestor_id'] = self.capa_system.anonymous_student_id
self.initial_display = find_with_default(
codeparam, 'initial_display', '')
_ = self.capa_system.i18n.ugettext
self.answer = find_with_default(codeparam, 'answer_display',
_(u'No answer provided.'))
def get_score(self, student_answers):
_ = self.capa_system.i18n.ugettext
try:
# Note that submission can be a file
submission = student_answers[self.answer_id]
except Exception as err:
log.error(
'Error in CodeResponse %s: cannot get student answer for %s;'
' student_answers=%s',
err, self.answer_id, convert_files_to_filenames(student_answers)
)
raise Exception(err)
# We do not support xqueue within Studio.
if self.capa_system.xqueue is None:
cmap = CorrectMap()
cmap.set(self.answer_id, queuestate=None,
msg=_(u'Error: No grader has been set up for this problem.'))
return cmap
# Prepare xqueue request
#------------------------------------------------------------
qinterface = self.capa_system.xqueue['interface']
qtime = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
anonymous_student_id = self.capa_system.anonymous_student_id
# Generate header
queuekey = xqueue_interface.make_hashkey(
str(self.capa_system.seed) + qtime + anonymous_student_id + self.answer_id
)
callback_url = self.capa_system.xqueue['construct_callback']()
xheader = xqueue_interface.make_xheader(
lms_callback_url=callback_url,
lms_key=queuekey,
queue_name=self.queue_name
)
# Generate body
if is_list_of_files(submission):
# TODO: Get S3 pointer from the Queue
self.context.update({'submission': ''})
else:
self.context.update({'submission': submission})
contents = self.payload.copy()
# Metadata related to the student submission revealed to the external
# grader
student_info = {
'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
'random_seed': self.context['seed'],
}
if getattr(self.capa_system, 'send_users_emailaddr_with_coderesponse', False) is True:
student_info.update({'student_email': self.capa_system.deanonymized_user_email})
contents.update({'student_info': json.dumps(student_info)})
# Submit request. When successful, 'msg' is the prior length of the
# queue
if is_list_of_files(submission):
# TODO: Is there any information we want to send here?
contents.update({'student_response': ''})
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents),
files_to_upload=submission)
else:
contents.update({'student_response': submission})
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
# State associated with the queueing request
queuestate = {'key': queuekey,
'time': qtime, }
cmap = CorrectMap()
if error:
_ = self.capa_system.i18n.ugettext
error_msg = _('Unable to deliver your submission to grader (Reason: {error_msg}).'
' Please try again later.').format(error_msg=msg)
cmap.set(self.answer_id, queuestate=None, msg=error_msg)
else:
# Queueing mechanism flags:
# 1) Backend: Non-null CorrectMap['queuestate'] indicates that
# the problem has been queued
# 2) Frontend: correctness='incomplete' eventually trickles down
# through inputtypes.textbox and .filesubmission to inform the
# browser to poll the LMS
cmap.set(self.answer_id, queuestate=queuestate,
correctness='incomplete', msg=msg)
return cmap
def update_score(self, score_msg, oldcmap, queuekey):
"""Updates the user's score based on the returned message from the grader."""
(valid_score_msg, correct, points, msg) = self._parse_score_msg(score_msg)
_ = self.capa_system.i18n.ugettext
dog_stats_api.increment(xqueue_interface.XQUEUE_METRIC_NAME, tags=[
'action:update_score',
'correct:{}'.format(correct)
])
dog_stats_api.histogram(xqueue_interface.XQUEUE_METRIC_NAME + '.update_score.points_earned', points)
if not valid_score_msg:
# Translators: 'grader' refers to the edX automatic code grader.
error_msg = _('Invalid grader reply. Please contact the course staff.')
oldcmap.set(self.answer_id, msg=error_msg)
return oldcmap
correctness = 'correct' if correct else 'incorrect'
# TODO: Find out how this is used elsewhere, if any
self.context['correct'] = correctness
# Replace 'oldcmap' with new grading results if queuekey matches. If queuekey
# does not match, we keep waiting for the score_msg whose key actually
# matches
if oldcmap.is_right_queuekey(self.answer_id, queuekey):
# Sanity check on returned points
if points < 0:
points = 0
# Queuestate is consumed
oldcmap.set(
self.answer_id, npoints=points, correctness=correctness,
msg=msg.replace(' ', ' '), queuestate=None)
else:
log.debug(
'CodeResponse: queuekey %s does not match for answer_id=%s.',
queuekey,
self.answer_id
)
return oldcmap
def get_answers(self):
anshtml = '<span class="code-answer"><pre><code>%s</code></pre></span>' % self.answer
return {self.answer_id: anshtml}
def get_initial_display(self):
"""
The course author can specify an initial display
to be displayed the code response box.
"""
return {self.answer_id: self.initial_display}
def _parse_score_msg(self, score_msg):
"""
Grader reply is a JSON-dump of the following dict
{ 'correct': True/False,
'score': Numeric value (floating point is okay) to assign to answer
'msg': grader_msg }
Returns (valid_score_msg, correct, score, msg):
valid_score_msg: Flag indicating valid score_msg format (Boolean)
correct: Correctness of submission (Boolean)
score: Points to be assigned (numeric, can be float)
msg: Message from grader to display to student (string)
"""
fail = (False, False, 0, '')
try:
score_result = json.loads(score_msg)
except (TypeError, ValueError):
log.error("External grader message should be a JSON-serialized dict."
" Received score_msg = %s", score_msg)
return fail
if not isinstance(score_result, dict):
log.error("External grader message should be a JSON-serialized dict."
" Received score_result = %s", score_result)
return fail
for tag in ['correct', 'score', 'msg']:
if tag not in score_result:
log.error("External grader message is missing one or more required"
" tags: 'correct', 'score', 'msg'")
return fail
# Next, we need to check that the contents of the external grader message is safe for the LMS.
# 1) Make sure that the message is valid XML (proper opening/closing tags)
# 2) If it is not valid XML, make sure it is valid HTML.
# Note: html5lib parser will try to repair any broken HTML
# For example: <aaa></bbb> will become <aaa/>.
msg = score_result['msg']
try:
etree.fromstring(msg)
except etree.XMLSyntaxError as _err:
# If `html` contains attrs with no values, like `controls` in <audio controls src='smth'/>,
# XML parser will raise exception, so wee fallback to html5parser,
# which will set empty "" values for such attrs.
try:
parsed = html5lib.parseFragment(msg, treebuilder='lxml', namespaceHTMLElements=False)
except ValueError:
# the parsed message might contain strings that are not
# xml compatible, in which case, throw the error message
parsed = False
if not parsed:
log.error(
"Unable to parse external grader message as valid"
" XML: score_msg['msg']=%s",
msg,
)
return fail
return (True, score_result['correct'], score_result['score'], msg)
#-----------------------------------------------------------------------------
@registry.register
class ExternalResponse(LoncapaResponse):
"""
Grade the students input using an external server.
Typically used by coding problems.
"""
human_name = _('External Grader')
tags = ['externalresponse']
allowed_inputfields = ['textline', 'textbox']
awdmap = {
'EXACT_ANS': 'correct', # TODO: handle other loncapa responses
'WRONG_FORMAT': 'incorrect',
}
def __init__(self, *args, **kwargs):
self.url = ''
self.tests = []
self.code = ''
super(ExternalResponse, self).__init__(*args, **kwargs)
def setup_response(self):
xml = self.xml
# FIXME - hardcoded URL
self.url = xml.get('url') or "http://qisx.mit.edu:8889/pyloncapa"
answer = xml.find('answer')
if answer is not None:
answer_src = answer.get('src')
if answer_src is not None:
# TODO: this code seems not to be used any more since self.capa_system.filesystem doesn't exist.
self.code = self.capa_system.filesystem.open('src/' + answer_src).read()
else:
self.code = answer.text
else:
# no <answer> stanza; get code from <script>
self.code = self.context['script_code']
if not self.code:
msg = '%s: Missing answer script code for externalresponse' % unicode(
self)
msg += "\nSee XML source line %s" % getattr(
self.xml, 'sourceline', '<unavailable>')
raise LoncapaProblemError(msg)
self.tests = xml.get('tests')
def do_external_request(self, cmd, extra_payload):
"""
Perform HTTP request / post to external server.
cmd = remote command to perform (str)
extra_payload = dict of extra stuff to post.
Return XML tree of response (from response body)
"""
xmlstr = etree.tostring(self.xml, pretty_print=True)
payload = {
'xml': xmlstr,
'edX_cmd': cmd,
'edX_tests': self.tests,
'processor': self.code,
}
payload.update(extra_payload)
try:
# call external server. TODO: synchronous call, can block for a
# long time
req = requests.post(self.url, data=payload)
except Exception as err:
msg = 'Error {0} - cannot connect to external server url={1}'.format(err, self.url)
log.error(msg)
raise Exception(msg)
if self.capa_system.DEBUG:
log.info('response = %s', req.text)
if (not req.text) or (not req.text.strip()):
raise Exception(
'Error: no response from external server url=%s' % self.url)
try:
# response is XML; parse it
rxml = etree.fromstring(req.text)
except Exception as err:
msg = 'Error {0} - cannot parse response from external server req.text={1}'.format(err, req.text)
log.error(msg)
raise Exception(msg)
return rxml
def get_score(self, student_answers):
idset = sorted(self.answer_ids)
cmap = CorrectMap()
try:
submission = [student_answers[k] for k in idset]
except Exception as err:
log.error(
'Error %s: cannot get student answer for %s; student_answers=%s',
err,
self.answer_ids,
student_answers
)
raise Exception(err)
self.context.update({'submission': submission})
extra_payload = {'edX_student_response': json.dumps(submission)}
try:
rxml = self.do_external_request('get_score', extra_payload)
except Exception as err: # pylint: disable=broad-except
log.error('Error %s', err)
if self.capa_system.DEBUG:
cmap.set_dict(dict(zip(sorted(
self.answer_ids), ['incorrect'] * len(idset))))
cmap.set_property(
self.answer_ids[0], 'msg',
'<span class="inline-error">%s</span>' % str(err).replace('<', '<'))
return cmap
awd = rxml.find('awarddetail').text
self.context['correct'] = ['correct']
if awd in self.awdmap:
self.context['correct'][0] = self.awdmap[awd]
# create CorrectMap
for key in idset:
idx = idset.index(key)
msg = rxml.find('message').text.replace(
' ', ' ') if idx == 0 else None
cmap.set(key, self.context['correct'][idx], msg=msg)
return cmap
def get_answers(self):
"""
Use external server to get expected answers
"""
try:
rxml = self.do_external_request('get_answers', {})
exans = json.loads(rxml.find('expected').text)
except Exception as err: # pylint: disable=broad-except
log.error('Error %s', err)
if self.capa_system.DEBUG:
msg = '<span class="inline-error">%s</span>' % str(
err).replace('<', '<')
exans = [''] * len(self.answer_ids)
exans[0] = msg
if not len(exans) == len(self.answer_ids):
log.error('Expected %s answers from external server, only got %s!',
len(self.answer_ids), len(exans))
raise Exception('Short response from external server')
return dict(zip(self.answer_ids, exans))
#-----------------------------------------------------------------------------
@registry.register
class FormulaResponse(LoncapaResponse):
"""
Checking of symbolic math response using numerical sampling.
"""
human_name = _('Math Expression Input')
tags = ['formularesponse']
hint_tag = 'formulahint'
allowed_inputfields = ['textline', 'formulaequationinput']
required_attributes = ['answer', 'samples']
max_inputfields = 1
multi_device_support = True
def __init__(self, *args, **kwargs):
self.correct_answer = ''
self.samples = ''
self.tolerance = default_tolerance
self.case_sensitive = False
super(FormulaResponse, self).__init__(*args, **kwargs)
def setup_response(self):
xml = self.xml
context = self.context
self.correct_answer = contextualize_text(xml.get('answer'), context)
self.samples = contextualize_text(xml.get('samples'), context)
# Find the tolerance
tolerance_xml = xml.xpath(
'//*[@id=$id]//responseparam[@type="tolerance"]/@default',
id=xml.get('id')
)
if tolerance_xml: # If it isn't an empty list...
self.tolerance = contextualize_text(tolerance_xml[0], context)
types = xml.get('type')
if types is None:
typeslist = []
else:
typeslist = types.split(',')
if 'ci' in typeslist:
# Case insensitive
self.case_sensitive = False
elif 'cs' in typeslist:
# Case sensitive
self.case_sensitive = True
else:
# Default
self.case_sensitive = False
def get_score(self, student_answers):
given = student_answers[self.answer_id]
correctness = self.check_formula(
self.correct_answer,
given,
self.samples
)
return CorrectMap(self.answer_id, correctness)
def tupleize_answers(self, answer, var_dict_list):
"""
Takes in an answer and a list of dictionaries mapping variables to values.
Each dictionary represents a test case for the answer.
Returns a tuple of formula evaluation results.
"""
_ = self.capa_system.i18n.ugettext
out = []
for var_dict in var_dict_list:
try:
out.append(evaluator(
var_dict,
dict(),
answer,
case_sensitive=self.case_sensitive,
))
except UndefinedVariable as err:
log.debug(
'formularesponse: undefined variable in formula=%s',
cgi.escape(answer)
)
raise StudentInputError(
_("Invalid input: {bad_input} not permitted in answer.").format(bad_input=err.message)
)
except ValueError as err:
if 'factorial' in err.message:
# This is thrown when fact() or factorial() is used in a formularesponse answer
# that tests on negative and/or non-integer inputs
# err.message will be: `factorial() only accepts integral values` or
# `factorial() not defined for negative values`
log.debug(
('formularesponse: factorial function used in response '
'that tests negative and/or non-integer inputs. '
'Provided answer was: %s'),
cgi.escape(answer)
)
raise StudentInputError(
_("factorial function not permitted in answer "
"for this problem. Provided answer was: "
"{bad_input}").format(bad_input=cgi.escape(answer))
)
# If non-factorial related ValueError thrown, handle it the same as any other Exception
log.debug('formularesponse: error %s in formula', err)
raise StudentInputError(
_("Invalid input: Could not parse '{bad_input}' as a formula.").format(
bad_input=cgi.escape(answer)
)
)
except Exception as err:
# traceback.print_exc()
log.debug('formularesponse: error %s in formula', err)
raise StudentInputError(
_("Invalid input: Could not parse '{bad_input}' as a formula").format(
bad_input=cgi.escape(answer)
)
)
return out
def randomize_variables(self, samples):
"""
Returns a list of dictionaries mapping variables to random values in range,
as expected by tupleize_answers.
"""
variables = samples.split('@')[0].split(',')
numsamples = int(samples.split('@')[1].split('#')[1])
sranges = zip(*map(lambda x: map(float, x.split(",")),
samples.split('@')[1].split('#')[0].split(':')))
ranges = dict(zip(variables, sranges))
out = []
for _ in range(numsamples):
var_dict = {}
# ranges give numerical ranges for testing
for var in ranges:
# TODO: allow specified ranges (i.e. integers and complex numbers) for random variables
value = random.uniform(*ranges[var])
var_dict[str(var)] = value
out.append(var_dict)
return out
def check_formula(self, expected, given, samples):
"""
Given an expected answer string, a given (student-produced) answer
string, and a samples string, return whether the given answer is
"correct" or "incorrect".
"""
var_dict_list = self.randomize_variables(samples)
student_result = self.tupleize_answers(given, var_dict_list)
instructor_result = self.tupleize_answers(expected, var_dict_list)
correct = all(compare_with_tolerance(student, instructor, self.tolerance)
for student, instructor in zip(student_result, instructor_result))
if correct:
return "correct"
else:
return "incorrect"
def compare_answer(self, ans1, ans2):
"""
An external interface for comparing whether a and b are equal.
"""
internal_result = self.check_formula(ans1, ans2, self.samples)
return internal_result == "correct"
def validate_answer(self, answer):
"""
Returns whether this answer is in a valid form.
"""
var_dict_list = self.randomize_variables(self.samples)
try:
self.tupleize_answers(answer, var_dict_list)
return True
except StudentInputError:
return False
def strip_dict(self, inp_d):
"""
Takes a dict. Returns an identical dict, with all non-word
keys and all non-numeric values stripped out. All values also
converted to float. Used so we can safely use Python contexts.
"""
inp_d = dict([(k, numpy.complex(inp_d[k]))
for k in inp_d if isinstance(k, str) and
k.isalnum() and
isinstance(inp_d[k], numbers.Number)])
return inp_d
def check_hint_condition(self, hxml_set, student_answers):
given = student_answers[self.answer_id]
hints_to_show = []
for hxml in hxml_set:
samples = hxml.get('samples')
name = hxml.get('name')
correct_answer = contextualize_text(
hxml.get('answer'), self.context)
# pylint: disable=broad-except
try:
correctness = self.check_formula(
correct_answer,
given,
samples
)
except Exception:
correctness = 'incorrect'
if correctness == 'correct':
hints_to_show.append(name)
log.debug('hints_to_show = %s', hints_to_show)
return hints_to_show
def get_answers(self):
return {self.answer_id: self.correct_answer}
#-----------------------------------------------------------------------------
@registry.register
class SchematicResponse(LoncapaResponse):
"""
Circuit schematic response type.
"""
human_name = _('Circuit Schematic Builder')
tags = ['schematicresponse']
allowed_inputfields = ['schematic']
def __init__(self, *args, **kwargs):
self.code = ''
super(SchematicResponse, self).__init__(*args, **kwargs)
def setup_response(self):
xml = self.xml
answer = xml.xpath('//*[@id=$id]//answer', id=xml.get('id'))[0]
answer_src = answer.get('src')
if answer_src is not None:
# Untested; never used
self.code = self.capa_system.filestore.open('src/' + answer_src).read()
else:
self.code = answer.text
def get_score(self, student_answers):
submission = [
json.loads(student_answers[k]) for k in sorted(self.answer_ids)
]
self.context.update({'submission': submission})
try:
safe_exec.safe_exec(
self.code,
self.context,
cache=self.capa_system.cache,
python_path=self.context['python_path'],
extra_files=self.context['extra_files'],
slug=self.id,
random_seed=self.context['seed'],
unsafely=self.capa_system.can_execute_unsafe_code(),
)
except Exception as err:
_ = self.capa_system.i18n.ugettext
# Translators: 'SchematicResponse' is a problem type and should not be translated.
msg = _('Error in evaluating SchematicResponse. The error was: {error_msg}').format(error_msg=err)
raise ResponseError(msg)
cmap = CorrectMap()
cmap.set_dict(dict(zip(sorted(self.answer_ids), self.context['correct'])))
return cmap
def get_answers(self):
# use answers provided in input elements
return self.default_answer_map
#-----------------------------------------------------------------------------
@registry.register
class ImageResponse(LoncapaResponse):
"""
Handle student response for image input: the input is a click on an image,
which produces an [x,y] coordinate pair. The click is correct if it falls
within a region specified. This region is a union of rectangles.
Lon-CAPA requires that each <imageresponse> has a <foilgroup> inside it.
That doesn't make sense to me (Ike). Instead, let's have it such that
<imageresponse> should contain one or more <imageinput> stanzas.
Each <imageinput> should specify a rectangle(s) or region(s), given as an
attribute, defining the correct answer.
<imageinput src="/static/images/Lecture2/S2_p04.png" width="811" height="610"
rectangle="(10,10)-(20,30);(12,12)-(40,60)"
regions="[[[10,10], [20,30], [40, 10]], [[100,100], [120,130], [110,150]]]"/>
Regions is list of lists [region1, region2, region3, ...] where regionN
is disordered list of points: [[1,1], [100,100], [50,50], [20, 70]].
If there is only one region in the list, simpler notation can be used:
regions="[[10,10], [30,30], [10, 30], [30, 10]]" (without explicitly
setting outer list)
Returns:
True, if click is inside any region or rectangle. Otherwise False.
"""
human_name = _('Image Mapped Input')
tags = ['imageresponse']
allowed_inputfields = ['imageinput']
def __init__(self, *args, **kwargs):
self.ielements = []
super(ImageResponse, self).__init__(*args, **kwargs)
def setup_response(self):
self.ielements = self.inputfields
self.answer_ids = [ie.get('id') for ie in self.ielements]
def get_score(self, student_answers):
_ = self.capa_system.i18n.ugettext
correct_map = CorrectMap()
expectedset = self.get_mapped_answers()
for aid in self.answer_ids: # loop through IDs of <imageinput>
# Fields in our stanza
given = student_answers[aid] # This should be a string of the form '[x,y]'
correct_map.set(aid, 'incorrect')
if not given: # No answer to parse. Mark as incorrect and move on
continue
# Parse given answer
acoords = re.match(r'\[([0-9]+),([0-9]+)]', given.strip().replace(' ', ''))
if not acoords:
msg = _('error grading {image_input_id} (input={user_input})').format(
image_input_id=aid,
user_input=given
)
raise Exception('[capamodule.capa.responsetypes.imageinput] ' + msg)
(ans_x, ans_y) = [int(x) for x in acoords.groups()]
rectangles, regions = expectedset
if rectangles[aid]: # Rectangles part - for backward compatibility
# Check whether given point lies in any of the solution
# rectangles
solution_rectangles = rectangles[aid].split(';')
for solution_rectangle in solution_rectangles:
# parse expected answer
# TODO: Compile regexp on file load
sr_coords = re.match(
r'[\(\[]([0-9]+),([0-9]+)[\)\]]-[\(\[]([0-9]+),([0-9]+)[\)\]]',
solution_rectangle.strip().replace(' ', ''))
if not sr_coords:
# Translators: {sr_coords} are the coordinates of a rectangle
msg = _('Error in problem specification! Cannot parse rectangle in {sr_coords}').format(
sr_coords=etree.tostring(self.ielements[aid], pretty_print=True)
)
raise Exception('[capamodule.capa.responsetypes.imageinput] ' + msg)
(llx, lly, urx, ury) = [int(x) for x in sr_coords.groups()]
# answer is correct if (x,y) is within the specified
# rectangle
if (llx <= ans_x <= urx) and (lly <= ans_y <= ury):
correct_map.set(aid, 'correct')
break
if correct_map[aid]['correctness'] != 'correct' and regions[aid]:
parsed_region = json.loads(regions[aid])
if parsed_region:
if not isinstance(parsed_region[0][0], list):
# we have [[1,2],[3,4],[5,6]] - single region
# instead of [[[1,2],[3,4],[5,6], [[1,2],[3,4],[5,6]]]
# or [[[1,2],[3,4],[5,6]]] - multiple regions syntax
parsed_region = [parsed_region]
for region in parsed_region:
polygon = MultiPoint(region).convex_hull
if (polygon.type == 'Polygon' and
polygon.contains(Point(ans_x, ans_y))):
correct_map.set(aid, 'correct')
break
return correct_map
def get_mapped_answers(self):
"""
Returns the internal representation of the answers
Input:
None
Returns:
tuple (dict, dict) -
rectangles (dict) - a map of inputs to the defined rectangle for that input
regions (dict) - a map of inputs to the defined region for that input
"""
answers = (
dict([(ie.get('id'), ie.get(
'rectangle')) for ie in self.ielements]),
dict([(ie.get('id'), ie.get('regions')) for ie in self.ielements]))
return answers
def get_answers(self):
"""
Returns the external representation of the answers
Input:
None
Returns:
dict (str, (str, str)) - a map of inputs to a tuple of their rectangle
and their regions
"""
answers = {}
for ielt in self.ielements:
ie_id = ielt.get('id')
answers[ie_id] = {'rectangle': ielt.get('rectangle'), 'regions': ielt.get('regions')}
return answers
#-----------------------------------------------------------------------------
@registry.register
class AnnotationResponse(LoncapaResponse):
"""
Checking of annotation responses.
The response contains both a comment (student commentary) and an option (student tag).
Only the tag is currently graded. Answers may be incorrect, partially correct, or correct.
"""
human_name = _('Annotation Input')
tags = ['annotationresponse']
allowed_inputfields = ['annotationinput']
max_inputfields = 1
default_scoring = {'incorrect': 0, 'partially-correct': 1, 'correct': 2}
def __init__(self, *args, **kwargs):
self.scoring_map = {}
self.answer_map = {}
super(AnnotationResponse, self).__init__(*args, **kwargs)
def setup_response(self):
self.scoring_map = self._get_scoring_map()
self.answer_map = self._get_answer_map()
self.maxpoints = self._get_max_points()
def get_score(self, student_answers):
"""
Returns a CorrectMap for the student answer, which may include
partially correct answers.
"""
student_answer = student_answers[self.answer_id]
student_option = self._get_submitted_option_id(student_answer)
scoring = self.scoring_map[self.answer_id]
is_valid = student_option is not None and student_option in scoring.keys(
)
(correctness, points) = ('incorrect', None)
if is_valid:
correctness = scoring[student_option]['correctness']
points = scoring[student_option]['points']
return CorrectMap(self.answer_id, correctness=correctness, npoints=points)
def get_answers(self):
return self.answer_map
def _get_scoring_map(self):
"""Returns a dict of option->scoring for each input."""
scoring = self.default_scoring
choices = dict([(choice, choice) for choice in scoring])
scoring_map = {}
for inputfield in self.inputfields:
option_scoring = dict([(
option['id'],
{
'correctness': choices.get(option['choice']),
'points': scoring.get(option['choice'])
}
) for option in self._find_options(inputfield)])
scoring_map[inputfield.get('id')] = option_scoring
return scoring_map
def _get_answer_map(self):
"""Returns a dict of answers for each input."""
answer_map = {}
for inputfield in self.inputfields:
correct_option = self._find_option_with_choice(
inputfield, 'correct')
if correct_option is not None:
input_id = inputfield.get('id')
answer_map[input_id] = correct_option.get('description')
return answer_map
def _get_max_points(self):
"""Returns a dict of the max points for each input: input id -> maxpoints."""
scoring = self.default_scoring
correct_points = scoring.get('correct')
return dict([(inputfield.get('id'), correct_points) for inputfield in self.inputfields])
def _find_options(self, inputfield):
"""Returns an array of dicts where each dict represents an option. """
elements = inputfield.findall('./options/option')
return [
{
'id': index,
'description': option.text,
'choice': option.get('choice')
} for (index, option) in enumerate(elements)
]
def _find_option_with_choice(self, inputfield, choice):
"""Returns the option with the given choice value, otherwise None. """
for option in self._find_options(inputfield):
if option['choice'] == choice:
return option
def _unpack(self, json_value):
"""Unpacks a student response value submitted as JSON."""
json_d = json.loads(json_value)
if not isinstance(json_d, dict):
json_d = {}
comment_value = json_d.get('comment', '')
if not isinstance(json_d, basestring):
comment_value = ''
| options_value = json_d.get('options', []) | 13,691 | lcc_e | python | null | 209e74b85df5f578309a7c9d543f7244f663f07342936dfd |
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bibauthorid Web Interface Logic and URL handler. """
# pylint: disable=W0105
# pylint: disable=C0301
# pylint: disable=W0613
from cgi import escape
from pprint import pformat
from operator import itemgetter
import re
try:
from invenio.jsonutils import json, json_unicode_to_utf8, CFG_JSON_AVAILABLE
except ImportError:
CFG_JSON_AVAILABLE = False
json = None
from invenio.bibauthorid_webapi import add_cname_to_hepname_record
from invenio.bibauthorid_webapi import create_new_person
from invenio.config import CFG_SITE_URL, CFG_BASE_URL
from invenio.bibauthorid_config import AID_ENABLED, PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT, \
BIBAUTHORID_UI_SKIP_ARXIV_STUB_PAGE, VALID_EXPORT_FILTERS, PERSONS_PER_PAGE, \
MAX_NUM_SHOW_PAPERS, BIBAUTHORID_CFG_SITE_NAME, CFG_BIBAUTHORID_ENABLED
from invenio.config import CFG_SITE_LANG, CFG_SITE_URL, CFG_INSPIRE_SITE, CFG_SITE_SECURE_URL
from invenio.bibauthorid_name_utils import most_relevant_name, clean_string
from invenio.webpage import page, pageheaderonly, pagefooteronly
from invenio.messages import gettext_set_language # , wash_language
from invenio.template import load
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.session import get_session
from invenio.urlutils import redirect_to_url, get_canonical_and_alternates_urls
from invenio.webuser import (getUid,
page_not_authorized,
collect_user_info,
set_user_preferences,
get_user_preferences,
email_valid_p,
emailUnique,
get_email_from_username,
get_uid_from_email,
isGuestUser)
from invenio.access_control_admin import acc_get_user_roles
from invenio.search_engine import perform_request_search
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibauthorid_config import CREATE_NEW_PERSON
from invenio.bibsched import bibsched_task_finished_successfully, \
bibsched_task_finished_with_error, bibsched_task_running, bibsched_task_waiting, \
UnknownBibschedStatus
import invenio.webinterface_handler_config as apache
import invenio.webauthorprofile_interface as webauthorapi
import invenio.bibauthorid_webapi as webapi
from invenio.bibauthorid_general_utils import get_title_of_doi, get_title_of_arxiv_pubid, is_valid_orcid
from invenio.bibauthorid_backinterface import update_external_ids_of_authors, get_orcid_id_of_author, \
get_validated_request_tickets_for_author, get_title_of_paper, get_claimed_papers_of_author, \
get_free_author_id
from invenio.bibauthorid_dbinterface import defaultdict, remove_arxiv_papers_of_author, \
get_author_by_canonical_name, get_token, set_token, remove_rtid_from_ticket
from invenio.orcidutils import get_dois_from_orcid, get_dois_from_orcid_using_pid
from invenio.bibauthorid_webauthorprofileinterface import is_valid_canonical_id, get_person_id_from_canonical_id, \
get_person_redirect_link, author_has_papers
from invenio.bibauthorid_templates import WebProfileMenu, WebProfilePage
from invenio.bibauthorid_general_utils import get_inspire_record_url
from invenio.bibcatalog import BIBCATALOG_SYSTEM
# Imports related to hepnames update form
from invenio.bibedit_utils import get_bibrecord
from invenio.bibrecord import record_get_field_value, record_get_field_values, \
record_get_field_instances, field_get_subfield_values
from invenio.bibauthorid_name_utils import split_name_parts
from invenio.orcidutils import push_orcid_papers
TEMPLATE = load('bibauthorid')
class WebInterfaceBibAuthorIDClaimPages(WebInterfaceDirectory):
'''
Handles /author/claim pages and AJAX requests.
Supplies the methods:
/author/claim/<string>
/author/claim/action
/author/claim/claimstub
/author/claim/export
/author/claim/merge_profiles_ajax
/author/claim/search_box_ajax
/author/claim/tickets_admin
/author/claim/search
'''
_exports = ['',
'action',
'claimstub',
'export',
'merge_profiles_ajax',
'search_box_ajax',
'tickets_admin'
]
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDClaimPages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
return
# check if it's a canonical id: e.g. "J.R.Ellis.1"
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
self.person_id = int(identifier)
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Serve the main person page.
Will use the object's person id to get a person's information.
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: a full page formatted in HTML
@rtype: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'open_claim': (str, None),
'ticketid': (int, -1),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
req.argd = argd # needed for perform_req_search
if self.person_id < 0:
return redirect_to_url(req, '%s/author/search' % (CFG_SITE_URL))
no_access = self._page_access_permission_wall(req, [self.person_id])
if no_access:
return no_access
pinfo['claim_in_process'] = True
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = pinfo['claim_in_process']
session.dirty = True
if self.person_id != -1:
pinfo['claimpaper_admin_last_viewed_pid'] = self.person_id
rt_ticket_id = argd['ticketid']
if rt_ticket_id != -1:
pinfo["admin_requested_ticket_id"] = rt_ticket_id
session.dirty = True
# Create menu and page using templates
cname = webapi.get_canonical_id_from_person_id(self.person_id)
menu = WebProfileMenu(str(cname), "claim", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("claim", webapi.get_longest_name_from_pid(self.person_id))
profile_page.add_profile_menu(menu)
full_name = webapi.get_longest_name_from_pid(self.person_id)
page_title = '%s - Publications Management' % full_name
guest_prompt = 'true'
if not CFG_INSPIRE_SITE:
guest_prompt = 'false'
if 'prompt_shown' not in session:
session['prompt_shown'] = False
if session['prompt_shown']:
guest_prompt = 'false'
else:
session['prompt_shown'] = True
session.dirty = True
profile_page.add_bootstrapped_data(json.dumps({
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s", guestPrompt: %s});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel, guest_prompt)
}))
if debug:
profile_page.add_debug_info(session)
# body = self._generate_optional_menu(ulevel, req, form)
content = self._generate_tabs(ulevel, req)
content += self._generate_footer(ulevel)
content = content.decode('utf-8', 'strict')
webapi.history_log_visit(req, 'claim', pid=self.person_id)
return page(title=page_title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=profile_page.get_wrapped_body("generic", {'html': content}).encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _page_access_permission_wall(self, req, req_pid=None, req_level=None):
'''
Display an error page if user not authorized to use the interface.
@param req: Apache Request Object for session management
@type req: Apache Request Object
@param req_pid: Requested person id
@type req_pid: int
@param req_level: Request level required for the page
@type req_level: string
'''
session = get_session(req)
uid = getUid(req)
pinfo = session["personinfo"]
uinfo = collect_user_info(req)
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
is_authorized = True
pids_to_check = []
if not AID_ENABLED:
return page_not_authorized(req, text=_("Fatal: Author ID capabilities are disabled on this system."))
if req_level and 'ulevel' in pinfo and pinfo["ulevel"] != req_level:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
if req_pid and not isinstance(req_pid, list):
pids_to_check = [req_pid]
elif req_pid and isinstance(req_pid, list):
pids_to_check = req_pid
if (not (uinfo['precached_usepaperclaim']
or uinfo['precached_usepaperattribution'])
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
if is_authorized and not webapi.user_can_view_CMP(uid):
is_authorized = False
if is_authorized and 'ticket' in pinfo:
for tic in pinfo["ticket"]:
if 'pid' in tic:
pids_to_check.append(tic['pid'])
if pids_to_check and is_authorized:
user_pid = webapi.get_pid_from_uid(uid)
if not uinfo['precached_usepaperattribution']:
if (not user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
elif (user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
for tic in list(pinfo["ticket"]):
if not tic["pid"] == user_pid:
pinfo['ticket'].remove(tic)
if not is_authorized:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
else:
return ""
def _generate_title(self, ulevel):
'''
Generates the title for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: title
@rtype: str
'''
def generate_title_guest():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_user():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (user interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_admin():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (administrator interface) for: ' + str(
webapi.get_person_redirect_link(self.person_id))
return title
generate_title = {'guest': generate_title_guest,
'user': generate_title_user,
'admin': generate_title_admin}
return generate_title[ulevel]()
def _generate_tabs(self, ulevel, req):
'''
Generates the tabs content for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: tabs content
@rtype: str
'''
from invenio.bibauthorid_templates import verbiage_dict as tmpl_verbiage_dict
from invenio.bibauthorid_templates import buttons_verbiage_dict as tmpl_buttons_verbiage_dict
def generate_tabs_guest(req):
links = list() # ['delete', 'commit','del_entry','commit_entry']
tabs = ['records', 'repealed', 'review']
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=list(),
verbiage_dict=tmpl_verbiage_dict['guest'],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['guest'],
show_reset_button=False)
def generate_tabs_user(req):
links = ['delete', 'del_entry']
tabs = ['records', 'repealed', 'review', 'tickets']
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
user_is_owner = 'not_owner'
if pinfo["claimpaper_admin_last_viewed_pid"] == webapi.get_pid_from_uid(uid):
user_is_owner = 'owner'
open_tickets = webapi.get_person_request_ticket(self.person_id)
tickets = list()
for t in open_tickets:
owns = False
for row in t[0]:
if row[0] == 'uid-ip' and row[1].split('||')[0] == str(uid):
owns = True
if owns:
tickets.append(t)
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=tickets,
verbiage_dict=tmpl_verbiage_dict['user'][user_is_owner],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['user'][user_is_owner])
def generate_tabs_admin(req, show_tabs=['records', 'repealed', 'review', 'comments', 'tickets', 'data'],
ticket_links=['delete', 'commit', 'del_entry', 'commit_entry'], open_tickets=None,
verbiage_dict=None, buttons_verbiage_dict=None, show_reset_button=True):
session = get_session(req)
personinfo = dict()
try:
personinfo = session["personinfo"]
except KeyError:
return ""
if 'ln' in personinfo:
ln = personinfo["ln"]
else:
ln = CFG_SITE_LANG
all_papers = webapi.get_papers_by_person_id(self.person_id, ext_out=True)
records = [{'recid': paper[0],
'bibref': paper[1],
'flag': paper[2],
'authorname': paper[3],
'authoraffiliation': paper[4],
'paperdate': paper[5],
'rt_status': paper[6],
'paperexperiment': paper[7]} for paper in all_papers]
rejected_papers = [row for row in records if row['flag'] < -1]
rest_of_papers = [row for row in records if row['flag'] >= -1]
review_needed = webapi.get_review_needing_records(self.person_id)
if len(review_needed) < 1:
if 'review' in show_tabs:
show_tabs.remove('review')
if open_tickets is None:
open_tickets = webapi.get_person_request_ticket(self.person_id)
else:
if len(open_tickets) < 1 and 'tickets' in show_tabs:
show_tabs.remove('tickets')
rt_tickets = None
if "admin_requested_ticket_id" in personinfo:
rt_tickets = personinfo["admin_requested_ticket_id"]
if verbiage_dict is None:
verbiage_dict = translate_dict_values(tmpl_verbiage_dict['admin'], ln)
if buttons_verbiage_dict is None:
buttons_verbiage_dict = translate_dict_values(tmpl_buttons_verbiage_dict['admin'], ln)
# send data to the template function
tabs = TEMPLATE.tmpl_admin_tabs(ln, person_id=self.person_id,
rejected_papers=rejected_papers,
rest_of_papers=rest_of_papers,
review_needed=review_needed,
rt_tickets=rt_tickets,
open_rt_tickets=open_tickets,
show_tabs=show_tabs,
ticket_links=ticket_links,
verbiage_dict=verbiage_dict,
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button)
return tabs
def translate_dict_values(dictionary, ln):
def translate_str_values(dictionary, f=lambda x: x):
translated_dict = dict()
for key, value in dictionary.iteritems():
if isinstance(value, str):
translated_dict[key] = f(value)
elif isinstance(value, dict):
translated_dict[key] = translate_str_values(value, f)
else:
raise TypeError("Value should be either string or dictionary.")
return translated_dict
return translate_str_values(dictionary, f=gettext_set_language(ln))
generate_tabs = {'guest': generate_tabs_guest,
'user': generate_tabs_user,
'admin': generate_tabs_admin}
return generate_tabs[ulevel](req)
def _generate_footer(self, ulevel):
'''
Generates the footer for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: footer
@rtype: str
'''
def generate_footer_guest():
return TEMPLATE.tmpl_invenio_search_box()
def generate_footer_user():
return generate_footer_guest()
def generate_footer_admin():
return generate_footer_guest()
generate_footer = {'guest': generate_footer_guest,
'user': generate_footer_user,
'admin': generate_footer_admin}
return generate_footer[ulevel]()
def _ticket_dispatch_end(self, req):
'''
The ticket dispatch is finished, redirect to the original page of
origin or to the last_viewed_pid or return to the papers autoassigned box to populate its data
'''
session = get_session(req)
pinfo = session["personinfo"]
webapi.session_bareinit(req)
if 'claim_in_process' in pinfo:
pinfo['claim_in_process'] = False
if "merge_ticket" in pinfo and pinfo['merge_ticket']:
pinfo['merge_ticket'] = []
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = True
session.dirty = True
if "referer" in pinfo and pinfo["referer"]:
referer = pinfo["referer"]
del(pinfo["referer"])
session.dirty = True
return redirect_to_url(req, referer)
# if we are coming fromt he autoclaim box we should not redirect and just return to the caller function
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == False and pinfo['autoclaim']['begin_autoclaim'] == True:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] = False
session.dirty = True
else:
redirect_page = webapi.history_get_last_visited_url(
pinfo['visit_diary'], limit_to_page=['manage_profile', 'claim'])
if not redirect_page:
redirect_page = webapi.get_fallback_redirect_link(req)
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] and pinfo['autoclaim']['checkout']:
redirect_page = '%s/author/claim/action?checkout=True' % (CFG_SITE_URL,)
pinfo['autoclaim']['checkout'] = False
session.dirty = True
elif not 'manage_profile' in redirect_page:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
redirect_page = '%s/author/claim/%s?open_claim=True' % (
CFG_SITE_URL,
webapi.get_person_redirect_link(pinfo["claimpaper_admin_last_viewed_pid"]))
else:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
return redirect_to_url(req, redirect_page)
# redirect_link = diary('get_redirect_link', caller='_ticket_dispatch_end', parameters=[('open_claim','True')])
# return redirect_to_url(req, redirect_link)
def _check_user_fields(self, req, form):
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'user_first_name': (str, None),
'user_last_name': (str, None),
'user_email': (str, None),
'user_comments': (str, None)})
session = get_session(req)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
skip_checkout_faulty_fields = False
if ulevel in ['user', 'admin']:
skip_checkout_faulty_fields = True
if not ("user_first_name_sys" in pinfo and pinfo["user_first_name_sys"]):
if "user_first_name" in argd and argd['user_first_name']:
if not argd["user_first_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_first_name")
else:
pinfo["user_first_name"] = escape(argd["user_first_name"])
if not ("user_last_name_sys" in pinfo and pinfo["user_last_name_sys"]):
if "user_last_name" in argd and argd['user_last_name']:
if not argd["user_last_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_last_name")
else:
pinfo["user_last_name"] = escape(argd["user_last_name"])
if not ("user_email_sys" in pinfo and pinfo["user_email_sys"]):
if "user_email" in argd and argd['user_email']:
if not email_valid_p(argd["user_email"]):
pinfo["checkout_faulty_fields"].append("user_email")
else:
pinfo["user_email"] = escape(argd["user_email"])
if (ulevel == "guest"
and emailUnique(argd["user_email"]) > 0):
pinfo["checkout_faulty_fields"].append("user_email_taken")
else:
pinfo["checkout_faulty_fields"].append("user_email")
if "user_comments" in argd:
if argd["user_comments"]:
pinfo["user_ticket_comments"] = escape(argd["user_comments"])
else:
pinfo["user_ticket_comments"] = ""
session.dirty = True
def action(self, req, form):
'''
Initial step in processing of requests: ticket generation/update.
Also acts as action dispatcher for interface mass action requests.
Valid mass actions are:
- add_external_id: add an external identifier to an author
- add_missing_external_ids: add missing external identifiers of an author
- bibref_check_submit:
- cancel: clean the session (erase tickets and so on)
- cancel_rt_ticket:
- cancel_search_ticket:
- cancel_stage:
- checkout:
- checkout_continue_claiming:
- checkout_remove_transaction:
- checkout_submit:
- claim: claim papers for an author
- commit_rt_ticket:
- confirm: confirm assignments to an author
- delete_external_ids: delete external identifiers of an author
- repeal: repeal assignments from an author
- reset: reset assignments of an author
- set_canonical_name: set/swap the canonical name of an author
- to_other_person: assign a document from an author to another author
@param req: apache request object
@type req: apache request object
@param form: parameters sent via GET or POST request
@type form: dict
@return: a full page formatted in HTML
@return: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session["personinfo"]
argd = wash_urlargd(form,
{'autoclaim_show_review': (str, None),
'canonical_name': (str, None),
'existing_ext_ids': (list, None),
'ext_id': (str, None),
'uid': (int, None),
'ext_system': (str, None),
'ln': (str, CFG_SITE_LANG),
'pid': (int, -1),
'primary_profile': (str, None),
'search_param': (str, None),
'rt_action': (str, None),
'rt_id': (int, None),
'selection': (list, None),
'rtid': (int, None),
# permitted actions
'add_external_id': (str, None),
'set_uid': (str, None),
'add_missing_external_ids': (str, None),
'associate_profile': (str, None),
'bibref_check_submit': (str, None),
'cancel': (str, None),
'cancel_merging': (str, None),
'cancel_rt_ticket': (str, None),
'cancel_search_ticket': (str, None),
'cancel_stage': (str, None),
'checkout': (str, None),
'checkout_continue_claiming': (str, None),
'checkout_remove_transaction': (str, None),
'checkout_submit': (str, None),
'assign': (str, None),
'commit_rt_ticket': (str, None),
'close_rt_ticket': (str, None),
'confirm': (str, None),
'delete_external_ids': (str, None),
'email': (str, None),
'merge': (str, None),
'reject': (str, None),
'repeal': (str, None),
'reset': (str, None),
'send_message': (str, None),
'set_canonical_name': (str, None),
'to_other_person': (str, None)})
ulevel = pinfo["ulevel"]
ticket = pinfo["ticket"]
uid = getUid(req)
ln = argd['ln']
action = None
permitted_actions = ['add_external_id',
'set_uid',
'add_missing_external_ids',
'associate_profile',
'bibref_check_submit',
'cancel',
'cancel_merging',
'cancel_rt_ticket',
'cancel_search_ticket',
'cancel_stage',
'checkout',
'checkout_continue_claiming',
'checkout_remove_transaction',
'checkout_submit',
'assign',
'close_rt_ticket',
'commit_rt_ticket',
'confirm',
'delete_external_ids',
'merge',
'reject',
'repeal',
'reset',
'send_message',
'set_canonical_name',
'to_other_person']
for act in permitted_actions:
# one action (the most) is enabled in the form
if argd[act] is not None:
action = act
no_access = self._page_access_permission_wall(req, None)
if no_access and action not in ["assign"]:
return no_access
# incomplete papers (incomplete paper info or other problems) trigger action function without user's interference
# in order to fix those problems and claim papers or remove them from the ticket
if (action is None
and "bibref_check_required" in pinfo
and pinfo["bibref_check_required"]):
if "bibref_check_reviewed_bibrefs" in pinfo:
del(pinfo["bibref_check_reviewed_bibrefs"])
session.dirty = True
def add_external_id():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot add external id to unknown person")
if argd['ext_system']:
ext_sys = argd['ext_system']
else:
return self._error_page(req, ln,
"Fatal: cannot add an external id without specifying the system")
if argd['ext_id']:
ext_id = argd['ext_id']
else:
return self._error_page(req, ln,
"Fatal: cannot add a custom external id without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.add_person_external_id(pid, ext_sys, ext_id, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def set_uid():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: current user is unknown")
if argd['uid'] is not None:
dest_uid = int(argd['uid'])
else:
return self._error_page(req, ln,
"Fatal: user id is not valid")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.set_person_uid(pid, dest_uid, userinfo)
# remove arxiv pubs of current pid
remove_arxiv_papers_of_author(pid)
dest_uid_pid = webapi.get_pid_from_uid(dest_uid)
if dest_uid_pid > -1:
# move the arxiv pubs of the dest_uid to the current pid
dest_uid_arxiv_papers = webapi.get_arxiv_papers_of_author(dest_uid_pid)
webapi.add_arxiv_papers_to_author(dest_uid_arxiv_papers, pid)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def add_missing_external_ids():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot recompute external ids for an unknown person")
update_external_ids_of_authors([pid], overwrite=False)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def associate_profile():
'''
associates the user with user id to the person profile with pid
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot associate profile without a person id.")
uid = getUid(req)
pid, profile_claimed = webapi.claim_profile(uid, pid)
redirect_pid = pid
if profile_claimed:
pinfo['pid'] = pid
pinfo['should_check_to_autoclaim'] = True
pinfo["login_info_message"] = "confirm_success"
session.dirty = True
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, redirect_pid))
# if someone have already claimed this profile it redirects to choose_profile with an error message
else:
param = ''
if 'search_param' in argd and argd['search_param']:
param = '&search_param=' + argd['search_param']
redirect_to_url(req, '%s/author/choose_profile?failed=%s%s' % (CFG_SITE_URL, True, param))
def bibref_check_submit():
pinfo["bibref_check_reviewed_bibrefs"] = list()
add_rev = pinfo["bibref_check_reviewed_bibrefs"].append
if ("bibrefs_auto_assigned" in pinfo
or "bibrefs_to_confirm" in pinfo):
person_reviews = list()
if ("bibrefs_auto_assigned" in pinfo
and pinfo["bibrefs_auto_assigned"]):
person_reviews.append(pinfo["bibrefs_auto_assigned"])
if ("bibrefs_to_confirm" in pinfo
and pinfo["bibrefs_to_confirm"]):
person_reviews.append(pinfo["bibrefs_to_confirm"])
for ref_review in person_reviews:
for person_id in ref_review:
for bibrec in ref_review[person_id]["bibrecs"]:
rec_grp = "bibrecgroup%s" % bibrec
elements = list()
if rec_grp in form:
if isinstance(form[rec_grp], str):
elements.append(form[rec_grp])
elif isinstance(form[rec_grp], list):
elements += form[rec_grp]
else:
continue
for element in elements:
test = element.split("||")
if test and len(test) > 1 and test[1]:
tref = test[1] + "," + str(bibrec)
tpid = webapi.wash_integer_id(test[0])
if (webapi.is_valid_bibref(tref)
and tpid > -1):
add_rev(element + "," + str(bibrec))
session.dirty = True
def cancel():
self.__session_cleanup(req)
return self._ticket_dispatch_end(req)
def cancel_merging():
'''
empties the session out of merge content and redirects to the manage profile page
that the user was viewing before the merge
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: Couldn't redirect to the previous page")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if pinfo['merge_profiles']:
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def cancel_rt_ticket():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot cancel unknown ticket")
if argd['rt_id'] is not None and argd['rt_action'] is not None:
rt_id = int(argd['rt_id'])
rt_action = argd['rt_action']
for bibrefrec in bibrefrecs:
webapi.delete_transaction_from_request_ticket(pid, rt_id, rt_action, bibrefrec)
else:
rt_id = int(bibrefrecs[0])
webapi.delete_request_ticket(pid, rt_id)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def cancel_search_ticket(without_return=False):
if 'search_ticket' in pinfo:
del(pinfo['search_ticket'])
session.dirty = True
if "claimpaper_admin_last_viewed_pid" in pinfo:
pid = pinfo["claimpaper_admin_last_viewed_pid"]
if not without_return:
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
if not without_return:
return self.search(req, form)
def cancel_stage():
if 'bibref_check_required' in pinfo:
del(pinfo['bibref_check_required'])
if 'bibrefs_auto_assigned' in pinfo:
del(pinfo['bibrefs_auto_assigned'])
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
for tt in [row for row in ticket if 'incomplete' in row]:
ticket.remove(tt)
session.dirty = True
return self._ticket_dispatch_end(req)
def checkout():
pass
# return self._ticket_final_review(req)
def checkout_continue_claiming():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
return self._ticket_dispatch_end(req)
def checkout_remove_transaction():
bibref = argd['checkout_remove_transaction']
if webapi.is_valid_bibref(bibref):
for rmt in [row for row in ticket if row["bibref"] == bibref]:
ticket.remove(rmt)
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def checkout_submit():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
if not ticket:
pinfo["checkout_faulty_fields"].append("tickets")
pinfo["checkout_confirmed"] = True
if pinfo["checkout_faulty_fields"]:
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def claim():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any papers selected. " + \
"Please go back and select which papers would you like to claim.")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot claim papers to an unknown person")
if action == 'assign':
claimed_recs = [paper[2] for paper in get_claimed_papers_of_author(pid)]
for bibrefrec in list(bibrefrecs):
_, rec = webapi.split_bibrefrec(bibrefrec)
if rec in claimed_recs:
bibrefrecs.remove(bibrefrec)
for bibrefrec in bibrefrecs:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
continue
ticket = pinfo['ticket']
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def claim_to_other_person():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any papers selected. " + \
"Please go back and select which papers would you like to claim.")
return self._ticket_open_assign_to_other_person(req, bibrefrecs, form)
def commit_rt_ticket():
if argd['selection'] is not None:
tid = argd['selection'][0]
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
return self._commit_rt_ticket(req, tid, pid)
def confirm_repeal_reset():
if argd['pid'] > -1 or int(argd['pid']) == CREATE_NEW_PERSON:
pid = argd['pid']
cancel_search_ticket(without_return=True)
else:
return self._ticket_open_assign_to_other_person(req, argd['selection'], form)
# return self._error_page(req, ln, "Fatal: cannot create ticket without a
# person id! (crr %s)" %repr(argd))
bibrefrecs = argd['selection']
if argd['confirm']:
action = 'assign'
if pid == CREATE_NEW_PERSON:
pid = create_new_person(getUid(req))
elif argd['repeal']:
action = 'reject'
elif argd['reset']:
action = 'reset'
else:
return self._error_page(req, ln, "Fatal: not existent action!")
for bibrefrec in bibrefrecs:
form['jsondata'] = json.dumps({'pid': str(pid),
'action': action,
'bibrefrec': bibrefrec,
'on': 'user'})
t = WebInterfaceAuthorTicketHandling()
t.add_operation(req, form)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def close_rt_ticket():
BIBCATALOG_SYSTEM.ticket_set_attribute(0, argd['rtid'], 'status', 'resolved')
remove_rtid_from_ticket(argd['rtid'], argd['pid'])
return redirect_to_url(req, "%s/author/claim/%s#tabTickets" % (CFG_SITE_URL, webapi.get_person_redirect_link(argd['pid'])))
def delete_external_ids():
'''
deletes association between the user with pid and the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot delete external ids from an unknown person")
if argd['existing_ext_ids'] is not None:
existing_ext_ids = argd['existing_ext_ids']
else:
return self._error_page(req, ln,
"Fatal: you must select at least one external id in order to delete it")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.delete_person_external_ids(pid, existing_ext_ids, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def none_action():
return self._error_page(req, ln,
"Fatal: cannot create ticket if no action selected.")
def merge():
'''
performs a merge if allowed on the profiles that the user chose
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without a primary profile!")
if argd['selection']:
profiles_to_merge = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without any profiles selected!")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
pids_to_merge = [webapi.get_person_id_from_canonical_id(cname) for cname in profiles_to_merge]
is_admin = False
if pinfo['ulevel'] == 'admin':
is_admin = True
# checking if there are restrictions regarding this merge
can_perform_merge, preventing_pid, error_message = webapi.merge_is_allowed(primary_pid, pids_to_merge, is_admin)
if not can_perform_merge:
# when redirected back to the merge profiles page display an error message
# about the currently attempted merge
session.dirty = True
req.status = apache.HTTP_CONFLICT
c_name = webapi.get_canonical_id_from_person_id(preventing_pid)
return 'Cannot merge profile: %s Reason: %s' % (c_name,
error_message)
if is_admin:
webapi.merge_profiles(primary_pid, pids_to_merge)
else:
name = ''
if 'user_last_name' in pinfo:
name = pinfo['user_last_name']
if 'user_first_name' in pinfo:
name += pinfo['user_first_name']
email = ''
if 'user_email' in pinfo:
email = pinfo['user_email']
elif 'email' in argd:
# the email was submitted in form
email = argd['email']
pinfo['form_email'] = email
selection_str = "&selection=".join(profiles_to_merge)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'merge link': "%s/author/merge_profiles?primary_profile=%s&selection=%s" % (CFG_SITE_URL, primary_cname, selection_str),
'uid': uid}
# a message is sent to the admin with info regarding the currently attempted merge
webapi.create_request_message(userinfo, subj=('Merge profiles request: %s' % primary_cname))
# when redirected back to the manage profile page display a message about the merge
pinfo['merge_info_message'] = ("success", "confirm_operation")
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def send_message():
'''
sends a message from the user to the admin
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
# pp = pprint.PrettyPrinter(indent=4)
# session_dump = pp.pprint(pinfo)
session_dump = str(pinfo)
name = ''
name_changed = False
name_given = ''
email = ''
email_changed = False
email_given = ''
comment = ''
last_page_visited = ''
if "user_last_name" in pinfo:
name = pinfo["user_last_name"]
if "user_first_name" in pinfo:
name += pinfo["user_first_name"]
name = name.rstrip()
if "user_email" in pinfo:
email = pinfo["user_email"]
email = email.rstrip()
if 'Name' in form:
if not name:
name = form['Name']
elif name != form['Name']:
name_given = form['Name']
name_changed = True
name = name.rstrip()
if 'E-mail'in form:
if not email:
email = form['E-mail']
elif name != form['E-mail']:
email_given = form['E-mail']
email_changed = True
email = email.rstrip()
if 'Comment' in form:
comment = form['Comment']
comment = comment.rstrip()
if not name or not comment or not email:
redirect_to_url(req, '%s/author/help?incomplete_params=%s' % (CFG_SITE_URL, True))
if 'last_page_visited' in form:
last_page_visited = form['last_page_visited']
uid = getUid(req)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'comment': comment,
'last_page_visited': last_page_visited,
'session_dump': session_dump,
'name_given': name_given,
'email_given': email_given,
'name_changed': name_changed,
'email_changed': email_changed,
'uid': uid}
webapi.create_request_message(userinfo)
def set_canonical_name():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot set canonical name to unknown person")
if argd['canonical_name'] is not None:
cname = argd['canonical_name']
else:
return self._error_page(req, ln,
"Fatal: cannot set a custom canonical name without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
if webapi.is_valid_canonical_id(cname):
webapi.swap_person_canonical_name(pid, cname, userinfo)
else:
webapi.update_person_canonical_name(pid, cname, userinfo)
return redirect_to_url(req, "%s/author/claim/%s%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid), '#tabData'))
action_functions = {'add_external_id': add_external_id,
'set_uid': set_uid,
'add_missing_external_ids': add_missing_external_ids,
'associate_profile': associate_profile,
'bibref_check_submit': bibref_check_submit,
'cancel': cancel,
'cancel_merging': cancel_merging,
'cancel_rt_ticket': cancel_rt_ticket,
'cancel_search_ticket': cancel_search_ticket,
'cancel_stage': cancel_stage,
'checkout': checkout,
'checkout_continue_claiming': checkout_continue_claiming,
'checkout_remove_transaction': checkout_remove_transaction,
'checkout_submit': checkout_submit,
'assign': claim,
'commit_rt_ticket': commit_rt_ticket,
'close_rt_ticket': close_rt_ticket,
'confirm': confirm_repeal_reset,
'delete_external_ids': delete_external_ids,
'merge': merge,
'reject': claim,
'repeal': confirm_repeal_reset,
'reset': confirm_repeal_reset,
'send_message': send_message,
'set_canonical_name': set_canonical_name,
'to_other_person': claim_to_other_person,
None: none_action}
return action_functions[action]()
def _ticket_open_assign_to_other_person(self, req, bibrefs, form):
'''
Initializes search to find a person to attach the selected records to
@param req: Apache request object
@type req: Apache request object
@param bibrefs: list of record IDs to consider
@type bibrefs: list of int
@param form: GET/POST request parameters
@type form: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
pinfo["search_ticket"] = dict()
search_ticket = pinfo["search_ticket"]
search_ticket['action'] = 'assign'
search_ticket['bibrefs'] = bibrefs
session.dirty = True
return self.search(req, form)
def _cancel_rt_ticket(self, req, tid, pid):
'''
deletes an RT ticket
'''
webapi.delete_request_ticket(pid, tid)
return redirect_to_url(req, "%s/author/claim/%s" %
(CFG_SITE_URL, webapi.get_person_redirect_link(str(pid))))
def _cancel_transaction_from_rt_ticket(self, tid, pid, action, bibref):
'''
deletes a transaction from an rt ticket
'''
webapi.delete_transaction_from_request_ticket(pid, tid, action, bibref)
def _commit_rt_ticket(self, req, tid, pid):
'''
Commit of an rt ticket: creates a real ticket and commits.
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
uid = getUid(req)
tid = int(tid)
try:
rt_ticket = get_validated_request_tickets_for_author(pid, tid)[0]
except IndexError:
msg = """This ticket with the tid: %s has already been
removed.""" % tid
return self._error_page(req, message=msg)
for action, bibrefrec in rt_ticket['operations']:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
webapi.delete_request_ticket(pid, tid)
redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % BIBAUTHORID_CFG_SITE_NAME,
keywords="%s, Internal Error" % BIBAUTHORID_CFG_SITE_NAME,
language=ln,
req=req)
def __session_cleanup(self, req):
'''
Cleans the session from all bibauthorid specific settings and
with that cancels any transaction currently in progress.
@param req: Apache Request Object
@type req: Apache Request Object
'''
session = get_session(req)
try:
pinfo = session["personinfo"]
except KeyError:
return
if "ticket" in pinfo:
pinfo['ticket'] = []
if "search_ticket" in pinfo:
pinfo['search_ticket'] = dict()
# clear up bibref checker if it's done.
if ("bibref_check_required" in pinfo
and not pinfo["bibref_check_required"]):
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
if "bibrefs_auto_assigned" in pinfo:
del(pinfo["bibrefs_auto_assigned"])
del(pinfo["bibref_check_required"])
if "checkout_confirmed" in pinfo:
del(pinfo["checkout_confirmed"])
if "checkout_faulty_fields" in pinfo:
del(pinfo["checkout_faulty_fields"])
# pinfo['ulevel'] = ulevel
# pinfo["claimpaper_admin_last_viewed_pid"] = -1
pinfo["admin_requested_ticket_id"] = -1
session.dirty = True
def _generate_search_ticket_box(self, req):
'''
Generate the search ticket to remember a pending search for Person
entities in an attribution process
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
pinfo = session["personinfo"]
search_ticket = None
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
if not search_ticket:
return ''
else:
return TEMPLATE.tmpl_search_ticket_box('person_search', 'assign_papers', search_ticket['bibrefs'])
def search_box(self, query, shown_element_functions):
'''
collecting the persons' data that the search function returned
@param req: Apache request object
@type req: Apache request object
@param query: the query string
@type query: string
@param shown_element_functions: contains the functions that will tell to the template which columns to show and what buttons to print
@type shown_element_functions: dict
@return: html body
@rtype: string
'''
pid_list = self._perform_search(query)
search_results = []
for pid in pid_list:
result = defaultdict(list)
result['pid'] = pid
result['canonical_id'] = webapi.get_canonical_id_from_person_id(pid)
result['name_variants'] = webapi.get_person_names_from_id(pid)
result['external_ids'] = webapi.get_external_ids_from_person_id(pid)
# this variable shows if we want to use the following data in the search template
if 'pass_status' in shown_element_functions and shown_element_functions['pass_status']:
result['status'] = webapi.is_profile_available(pid)
search_results.append(result)
body = TEMPLATE.tmpl_author_search(query, search_results, shown_element_functions)
body = TEMPLATE.tmpl_person_detail_layout(body)
return body
def search(self, req, form):
'''
Function used for searching a person based on a name with which the
function is queried.
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0),
'q': (str, None)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
try:
int(cname)
except ValueError:
is_owner = False
else:
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "search", ln, is_owner, self._is_admin(pinfo))
title = "Person search"
# Create Wrapper Page Markup
profile_page = WebProfilePage("search", title, no_cache=True)
profile_page.add_bootstrapped_data(json.dumps({
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
no_access = self._page_access_permission_wall(req)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_general_search_bar()
if no_access:
return no_access
search_ticket = None
bibrefs = []
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
for r in search_ticket['bibrefs']:
bibrefs.append(r)
if search_ticket and "ulevel" in pinfo:
if pinfo["ulevel"] == "admin":
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_assigning_search_new_person_generator(bibrefs)
content = ""
if search_ticket:
shown_element_functions['button_gen'] = TEMPLATE.tmpl_assigning_search_button_generator(bibrefs)
content = content + self._generate_search_ticket_box(req)
query = None
if 'q' in argd:
if argd['q']:
query = escape(argd['q'])
content += self.search_box(query, shown_element_functions)
body = profile_page.get_wrapped_body("generic", {'html': content})
parameter = None
if query:
parameter = '?search_param=%s' + query
webapi.history_log_visit(req, 'search', params=parameter)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def merge_profiles(self, req, form):
'''
begginig of the proccess that performs the merge over multipe person profiles
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'primary_profile': (str, None),
'search_param': (str, ''),
'selection': (list, None),
'verbose': (int, 0)})
ln = argd['ln']
primary_cname = argd['primary_profile']
search_param = argd['search_param']
selection = argd['selection']
debug = 'verbose' in argd and argd['verbose'] > 0
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
profiles_to_merge = pinfo['merge_profiles']
_ = gettext_set_language(ln)
if not primary_cname:
return page_not_authorized(req, text=_('This page is not accessible directly.'))
no_access = self._page_access_permission_wall(req)
if no_access:
return no_access
if selection is not None:
profiles_to_merge_session = [cname for cname, is_available in profiles_to_merge]
for profile in selection:
if profile not in profiles_to_merge_session:
pid = webapi.get_person_id_from_canonical_id(profile)
is_available = webapi.is_profile_available(pid)
pinfo['merge_profiles'].append([profile, '1' if is_available else '0'])
session.dirty = True
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
is_available = webapi.is_profile_available(primary_pid)
if not session['personinfo']['merge_primary_profile']:
session['personinfo']['merge_primary_profile'] = [primary_cname, '1' if is_available else '0']
session.dirty = True
body = ''
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
title = 'Merge Profiles'
menu = WebProfileMenu(str(cname), "manage_profile", ln, is_owner, self._is_admin(pinfo))
merge_page = WebProfilePage("merge_profile", title, no_cache=True)
merge_page.add_profile_menu(menu)
if debug:
merge_page.add_debug_info(pinfo)
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
body += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
body += TEMPLATE.tmpl_merge_ticket_box('person_search', 'merge_profiles', primary_cname)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_merge_profiles_search_bar(primary_cname)
shown_element_functions['button_gen'] = TEMPLATE.merge_profiles_button_generator()
shown_element_functions['pass_status'] = 'True'
gFormEmail = ""
if 'form_email' in pinfo:
gFormEmail = pinfo['form_email']
merge_page.add_bootstrapped_data(json.dumps({
"other": ("var gMergeProfile = %s; var gMergeList = %s;" +
"var gUserLevel = '%s'; var gFormEmail = '%s';") %
([primary_cname, '1' if is_available else '0'],
profiles_to_merge, pinfo['ulevel'], gFormEmail)
}))
body += self.search_box(search_param, shown_element_functions)
body = merge_page.get_wrapped_body("generic", {'html': body})
return page(title=title,
metaheaderadd=merge_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _perform_search(self, search_param):
'''
calls the search function on the search_param and returns the results
@param search_param: query string
@type search_param: String
@return: list of pids that the search found they match with the search query
@return: list
'''
pid_canditates_list = []
nquery = None
if search_param:
if search_param.count(":"):
try:
left, right = search_param.split(":")
try:
nsearch_param = str(right)
except (ValueError, TypeError):
try:
nsearch_param = str(left)
except (ValueError, TypeError):
nsearch_param = search_param
except ValueError:
nsearch_param = search_param
else:
nsearch_param = search_param
sorted_results = webapi.search_person_ids_by_name(nsearch_param)
for result in sorted_results:
pid_canditates_list.append(result[0])
return pid_canditates_list
def merge_profiles_ajax(self, req, form):
'''
Function used for handling Ajax requests used in order to add/remove profiles
in/from the merging profiles list, which is saved in the session.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if 'jsondata' in form:
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'addProfile':
if 'profile' in json_data:
profile = json_data['profile']
person_id = webapi.get_person_id_from_canonical_id(profile)
if person_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
profile_availability = webapi.is_profile_available(person_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
if profile not in [el[0] for el in profiles_to_merge]:
profiles_to_merge.append([profile, profile_availability])
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'addedPofile': profile})
json_response.update({'addedPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'removeProfile':
if 'profile' in json_data:
profile = json_data['profile']
if webapi.get_person_id_from_canonical_id(profile) != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
# print (str(profiles_to_merge))
if profile in [el[0] for el in profiles_to_merge]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'removedProfile': profile})
else:
json_response.update({'result': 'Error: Profile was missing already from the list'})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'setPrimaryProfile':
if 'profile' in json_data:
profile = json_data['profile']
profile_id = webapi.get_person_id_from_canonical_id(profile)
if profile_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profile_availability = webapi.is_profile_available(profile_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
profiles_to_merge = session["personinfo"]["merge_profiles"]
if profile in [el[0] for el in profiles_to_merge if el and el[0]]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
primary_profile = session["personinfo"]["merge_primary_profile"]
if primary_profile and primary_profile not in profiles_to_merge:
profiles_to_merge.append(primary_profile)
session["personinfo"]["merge_primary_profile"] = [profile, profile_availability]
session.dirty = True
json_response.update({'resultCode': 1})
json_response.update({'primaryProfile': profile})
json_response.update({'primaryPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def search_box_ajax(self, req, form):
'''
Function used for handling Ajax requests used in the search box.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if 'jsondata' in form:
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'getPapers':
if 'personId' in json_data:
pId = json_data['personId']
papers = sorted([[p[0]] for p in webapi.get_papers_by_person_id(int(pId), -1)],
key=itemgetter(0))
papers_html = TEMPLATE.tmpl_gen_papers(papers[0:MAX_NUM_SHOW_PAPERS])
json_response.update({'result': "\n".join(papers_html)})
json_response.update({'totalPapers': len(papers)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Missing person id'})
elif req_type == 'getNames':
if 'personId' in json_data:
pId = json_data['personId']
names = webapi.get_person_names_from_id(int(pId))
names_html = TEMPLATE.tmpl_gen_names(names)
json_response.update({'result': "\n".join(names_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'getIDs':
if 'personId' in json_data:
pId = json_data['personId']
ids = webapi.get_external_ids_from_person_id(int(pId))
ids_html = TEMPLATE.tmpl_gen_ext_ids(ids)
json_response.update({'result': "\n".join(ids_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'isProfileClaimed':
if 'personId' in json_data:
pId = json_data['personId']
isClaimed = webapi.get_uid_from_personid(pId)
if isClaimed != -1:
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def choose_profile(self, req, form):
'''
Generate SSO landing/choose_profile page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'search_param': (str, None),
'failed': (str, None),
'verbose': (int, 0)})
ln = argd['ln']
debug = "verbose" in argd and argd["verbose"] > 0
req.argd = argd # needed for perform_req_search
search_param = argd['search_param']
webapi.session_bareinit(req)
session = get_session(req)
uid = getUid(req)
pinfo = session['personinfo']
failed = True
if not argd['failed']:
failed = False
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
if 'arXiv' not in login_info['logged_in_to_remote_systems']:
return page_not_authorized(req, text=_("This page is not accessible directly."))
pid = webapi.get_user_pid(login_info['uid'])
# Create Wrapper Page Markup
is_owner = False
menu = WebProfileMenu('', "choose_profile", ln, is_owner, self._is_admin(pinfo))
choose_page = WebProfilePage("choose_profile", "Choose your profile", no_cache=True)
choose_page.add_profile_menu(menu)
if debug:
choose_page.add_debug_info(pinfo)
content = TEMPLATE.tmpl_choose_profile(failed)
body = choose_page.get_wrapped_body("generic", {'html': content})
# In any case, when we step by here, an autoclaim should be performed right after!
pinfo = session["personinfo"]
pinfo['should_check_to_autoclaim'] = True
session.dirty = True
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
# if already logged in then redirect the user to the page he was viewing
if pid != -1:
redirect_pid = pid
if last_visited_pid:
redirect_pid = last_visited_pid
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, str(redirect_pid)))
else:
# get name strings and email addresses from SSO/Oauth logins:
# {'system':{'name':[variant1,...,variantn], 'email':'blabla@bla.bla',
# 'pants_size':20}}
remote_login_systems_info = webapi.get_remote_login_systems_info(
req, login_info['logged_in_to_remote_systems'])
# get union of recids that are associated to the ids from all the external systems: set(inspire_recids_list)
recids = webapi.get_remote_login_systems_recids(req, login_info['logged_in_to_remote_systems'])
# this is the profile with the biggest intersection of papers so it's
# more probable that this is the profile the user seeks
probable_pid = webapi.match_profile(req, recids, remote_login_systems_info)
# if not search_param and probable_pid > -1 and probable_pid == last_visited_pid:
# try to assign the user to the profile he chose. If for some reason the profile is not available we assign him to an empty profile
# redirect_pid, profile_claimed = webapi.claim_profile(login_info['uid'], probable_pid)
# if profile_claimed:
# redirect_to_url(req,
# '%s/author/claim/action?associate_profile=True&redirect_pid=%s' %
# (CFG_SITE_URL, str(redirect_pid)))
probable_profile_suggestion_info = None
last_viewed_profile_suggestion_info = None
if last_visited_pid > -1 and webapi.is_profile_available(last_visited_pid):
# get information about the most probable profile and show it to the user
last_viewed_profile_suggestion_info = webapi.get_profile_suggestion_info(req, last_visited_pid, recids)
if probable_pid > -1 and webapi.is_profile_available(probable_pid):
# get information about the most probable profile and show it to the user
probable_profile_suggestion_info = webapi.get_profile_suggestion_info(req, probable_pid, recids)
if not search_param:
# we prefil the search with most relevant among the names that we get from external systems
name_variants = webapi.get_name_variants_list_from_remote_systems_names(remote_login_systems_info)
search_param = most_relevant_name(name_variants)
body = body + TEMPLATE.tmpl_probable_profile_suggestion(
probable_profile_suggestion_info,
last_viewed_profile_suggestion_info,
search_param)
free_id = get_free_author_id()
shown_element_functions = dict()
shown_element_functions['button_gen'] = TEMPLATE.tmpl_choose_profile_search_button_generator()
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_choose_profile_search_new_person_generator(free_id)
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_choose_profile_search_bar()
# show in the templates the column status (if profile is bound to a user or not)
shown_element_functions['show_status'] = True
# pass in the templates the data of the column status (if profile is bound to a user or not)
# we might need the data without having to show them in the columne (fi merge_profiles
shown_element_functions['pass_status'] = True
# show search results to the user
body = body + self.search_box(search_param, shown_element_functions)
body = body + TEMPLATE.tmpl_choose_profile_footer()
title = _(' ')
return page(title=title,
metaheaderadd=choose_page.get_head().encode('utf-8'),
body=body,
req=req,
language=ln)
@staticmethod
def _arxiv_box(req, login_info, person_id, user_pid):
'''
Proccess and collect data for arXiv box
@param req: Apache request object
@type req: Apache request object
@param login_info: status of login in the following format: {'logged_in': True, 'uid': 2, 'logged_in_to_remote_systems':['Arxiv', ...]}
@type login_info: dict
@param login_info: person id of the current page's profile
@type login_info: int
@param login_info: person id of the user
@type login_info: int
@return: data required to built the arXiv box
@rtype: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
arxiv_data = dict()
arxiv_data['view_own_profile'] = person_id == user_pid
# if the user is not a guest and he is connected through arXiv
arxiv_data['login'] = login_info['logged_in']
arxiv_data['user_pid'] = user_pid
arxiv_data['user_has_pid'] = user_pid != -1
# if the profile the use is logged in is the same with the profile of the page that the user views
arxiv_data['view_own_profile'] = user_pid == person_id
return arxiv_data
@staticmethod
def _orcid_box(arxiv_logged_in, person_id, user_pid, ulevel):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param arxiv_logged_in: shows if the user is logged in through arXiv or not
@type arxiv_logged_in: boolean
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param ulevel: user's level
@type ulevel: string
@return: data required to built the orcid box
@rtype: dict
'''
orcid_data = dict()
orcid_data['arxiv_login'] = arxiv_logged_in
orcid_data['orcids'] = None
orcid_data['add_power'] = False
orcid_data['own_profile'] = False
orcid_data['pid'] = person_id
# Indicates whether we should push the works or not.
orcid_data['push'] = not get_token(person_id)
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
orcid_data['own_profile'] = True
# if the user is an admin then he can add an existing orcid to the profile
if ulevel == "admin":
orcid_data['add_power'] = True
orcids = webapi.get_orcids_by_pid(person_id)
if orcids:
orcid_data['orcids'] = orcids
return orcid_data
@staticmethod
def _autoclaim_papers_box(req, person_id, user_pid, remote_logged_in_systems):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the autoclaim box
@rtype: dict
'''
autoclaim_data = dict()
# if no autoclaim should occur or had occured and results should be shown then the box should remain hidden
autoclaim_data['hidden'] = True
autoclaim_data['person_id'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
recids_to_autoclaim = webapi.get_remote_login_systems_recids(req, remote_logged_in_systems)
autoclaim_data['hidden'] = False
autoclaim_data['num_of_claims'] = len(recids_to_autoclaim)
return autoclaim_data
@staticmethod
def get_params_to_check_login_info(session):
def get_params_to_check_login_info_of_arxiv(session):
try:
return session['user_info']
except KeyError:
return None
def get_params_to_check_login_info_of_orcid(session):
pinfo = session['personinfo']
try:
pinfo['orcid']['has_orcid_id'] = bool(
get_orcid_id_of_author(pinfo['pid'])[0][0] and pinfo['orcid']['import_pubs'])
except:
pinfo['orcid']['has_orcid_id'] = False
session.dirty = True
return pinfo['orcid']
get_params_for_remote_system = {'arXiv': get_params_to_check_login_info_of_arxiv,
'orcid': get_params_to_check_login_info_of_orcid}
params = dict()
for system, get_params in get_params_for_remote_system.iteritems():
params[system] = get_params(session)
return params
@staticmethod
def _claim_paper_box(person_id):
'''
Proccess and collect data for claim paper box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the claim paper box
@rtype: dict
'''
claim_paper_data = dict()
claim_paper_data['canonical_id'] = str(webapi.get_canonical_id_from_person_id(person_id))
return claim_paper_data
@staticmethod
def _support_box():
'''
Proccess and collect data for support box
@return: data required to built the support box
@rtype: dict
'''
support_data = dict()
return support_data
@staticmethod
def _merge_box(person_id):
'''
Proccess and collect data for merge box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the merge box
@rtype: dict
'''
merge_data = dict()
search_param = webapi.get_canonical_id_from_person_id(person_id)
name_variants = [element[0] for element in webapi.get_person_names_from_id(person_id)]
mr_name = most_relevant_name(name_variants)
if mr_name:
search_param = mr_name.split(",")[0]
merge_data['search_param'] = search_param
merge_data['canonical_id'] = webapi.get_canonical_id_from_person_id(person_id)
return merge_data
@staticmethod
def _internal_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
external_ids_data = dict()
external_ids_data['uid'], external_ids_data['old_uids'] = webapi.get_internal_user_id_from_person_id(person_id)
external_ids_data['person_id'] = person_id
external_ids_data['user_pid'] = user_pid
external_ids_data['ulevel'] = ulevel
return external_ids_data
@staticmethod
def _external_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
internal_ids_data = dict()
internal_ids_data['ext_ids'] = webapi.get_external_ids_from_person_id(person_id)
internal_ids_data['person_id'] = person_id
internal_ids_data['user_pid'] = user_pid
internal_ids_data['ulevel'] = ulevel
return internal_ids_data
@staticmethod
def _hepnames_box(person_id):
return webapi.get_hepnames(person_id)
def tickets_admin(self, req, form):
'''
Generate SSO landing/welcome page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
webapi.session_bareinit(req)
no_access = self._page_access_permission_wall(req, req_level='admin')
if no_access:
return no_access
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "open_tickets", ln, is_owner, self._is_admin(pinfo))
title = "Open RT tickets"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
tickets = webapi.get_persons_with_open_tickets_list()
tickets = list(tickets)
for t in list(tickets):
tickets.remove(t)
tickets.append([clean_string(webapi.get_most_frequent_name_from_pid(int(t[0]))),
webapi.get_person_redirect_link(t[0]), t[0], t[1]])
content = TEMPLATE.tmpl_tickets_admin(tickets)
content = TEMPLATE.tmpl_person_detail_layout(content)
body = profile_page.get_wrapped_body("generic", {'html': content})
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def help(self, req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_BIBAUTHORID_ENABLED:
return page_not_authorized(req, text=_("This page is not accessible directly."))
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
title = "Help Center"
profile_page = WebProfilePage("help", title, no_cache=True)
template_parameters = {'base_url': CFG_BASE_URL}
body = profile_page.get_wrapped_body("help", template_parameters)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def export(self, req, form):
'''
Generate JSONized export of Person data
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'request': (str, None),
'userid': (str, None)})
if not CFG_JSON_AVAILABLE:
return "500_json_not_found__install_package"
# session = get_session(req)
request = None
userid = None
if "userid" in argd and argd['userid']:
userid = argd['userid']
else:
return "404_user_not_found"
if "request" in argd and argd['request']:
request = argd["request"]
# find user from ID
user_email = get_email_from_username(userid)
if user_email == userid:
return "404_user_not_found"
uid = get_uid_from_email(user_email)
uinfo = collect_user_info(uid)
# find person by uid
pid = webapi.get_pid_from_uid(uid)
# find papers py pid that are confirmed through a human.
papers = webapi.get_papers_by_person_id(pid, 2)
# filter by request param, e.g. arxiv
if not request:
return "404__no_filter_selected"
if not request in VALID_EXPORT_FILTERS:
return "500_filter_invalid"
if request == "arxiv":
query = "(recid:"
query += " OR recid:".join(papers)
query += ") AND 037:arxiv"
db_docs = perform_request_search(p=query, rg=0)
nickmail = ""
nickname = ""
db_arxiv_ids = []
try:
nickname = uinfo["nickname"]
except KeyError:
pass
if not nickname:
try:
nickmail = uinfo["email"]
except KeyError:
nickmail = user_email
nickname = nickmail
db_arxiv_ids = get_fieldvalues(db_docs, "037__a")
construct = {"nickname": nickname,
"claims": ";".join(db_arxiv_ids)}
jsondmp = json.dumps(construct)
signature = webapi.sign_assertion("arXiv", jsondmp)
construct["digest"] = signature
return json.dumps(construct)
index = __call__
class WebInterfaceBibAuthorIDManageProfilePages(WebInterfaceDirectory):
_exports = ['',
'import_orcid_pubs',
'push_orcid_pubs',
'connect_author_with_hepname',
'connect_author_with_hepname_ajax',
'suggest_orcid',
'suggest_orcid_ajax']
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDManageProfilePages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
self.original_identifier = str()
return
else:
self.original_identifier = identifier
# check if it's a canonical id: e.g. "J.R.Ellis.1"
try:
pid = int(identifier)
except ValueError:
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def _get_orcid_token(self, session, pinfo):
if 'oauth2_access_token' not in session:
return None
token = session['oauth2_access_token']
if token != '':
return token
return None
def __call__(self, req, form):
'''
Generate SSO landing/author management page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
pinfo['claim_in_process'] = True
argd = wash_urlargd(form, {
'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_BIBAUTHORID_ENABLED or self.person_id is None:
return page_not_authorized(req, text=_("This page is not accessible directly."))
if person_id < 0:
return self._error_page(req, message=("Identifier %s is not a valid person identifier or does not exist anymore!" % self.original_identifier))
# log the visit
webapi.history_log_visit(req, 'manage_profile', pid=person_id)
# store the arxiv papers the user owns
if uid > 0 and not pinfo['arxiv_status']:
uinfo = collect_user_info(req)
arxiv_papers = list()
if 'external_arxivids' in uinfo and uinfo['external_arxivids']:
arxiv_papers = uinfo['external_arxivids'].split(';')
if arxiv_papers:
webapi.add_arxiv_papers_to_author(arxiv_papers, person_id)
pinfo['arxiv_status'] = True
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
# Create Wrapper Page Markup
cname = webapi.get_canonical_id_from_person_id(self.person_id)
long_name = webapi.get_longest_name_from_pid(self.person_id)
# TODO: Replace dash with —
page_title = "%s - %s" % (long_name, _('Manage Profile'))
menu = WebProfileMenu(
str(cname),
"manage_profile",
ln,
self._is_profile_owner(pinfo['pid']),
self._is_admin(pinfo))
profile_page = WebProfilePage("manage_profile", long_name, no_cache=True)
profile_page.add_profile_menu(menu)
profile_page.add_bootstrapped_data(json.dumps({
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
user_pid = webapi.get_user_pid(login_info['uid'])
person_data = webapi.get_person_info_by_pid(person_id)
arxiv_data = WebInterfaceBibAuthorIDClaimPages._arxiv_box(req, login_info, person_id, user_pid)
orcid_data = WebInterfaceBibAuthorIDClaimPages._orcid_box(arxiv_data['login'], person_id, user_pid, ulevel)
orcid_data['token'] = self._get_orcid_token(session, pinfo)
claim_paper_data = WebInterfaceBibAuthorIDClaimPages._claim_paper_box(person_id)
support_data = WebInterfaceBibAuthorIDClaimPages._support_box()
ids_box_html = None
if ulevel == 'admin':
ext_ids_data = WebInterfaceBibAuthorIDClaimPages._external_ids_box(person_id, user_pid, ulevel)
int_ids_data = WebInterfaceBibAuthorIDClaimPages._internal_ids_box(person_id, user_pid, ulevel)
ids_box_html = TEMPLATE.tmpl_ext_ids_box(
person_id,
int_ids_data,
ext_ids_data,
ln,
add_box=False,
loading=False)
autoclaim_data = WebInterfaceBibAuthorIDClaimPages._autoclaim_papers_box(
req, person_id, user_pid, login_info['logged_in_to_remote_systems'])
merge_data = WebInterfaceBibAuthorIDClaimPages._merge_box(person_id)
hepnames_data = WebInterfaceBibAuthorIDClaimPages._hepnames_box(person_id)
content = ''
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
content += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
modal = ''
if 'orcid_info' in session:
orcid_info = session['orcid_info']['status']
else:
orcid_info = ''
if CFG_INSPIRE_SITE:
html_arxiv = TEMPLATE.tmpl_arxiv_box(arxiv_data, ln, add_box=False, loading=False)
html_orcid, modal = TEMPLATE.tmpl_orcid_box(orcid_data, ln, orcid_info, add_box=False, loading=False)
if hepnames_data is not None:
hepnames_data.update({
'cname': webapi.get_canonical_id_from_person_id(person_id),
'link_to_record': ulevel == "admin",
'hepnames_link': "%s/%s/" % (CFG_BASE_URL, "record"),
'new_record_link': 'http://slac.stanford.edu/spires/hepnames/additions.shtml',
'update_link': "http://inspirehep.net/person/update?IRN=",
'profile_link': "%s/%s" % (CFG_BASE_URL, "author/profile/")
})
html_hepnames = WebProfilePage.render_template('personal_details_box', hepnames_data)
else:
html_hepnames = "Loading.."
html_support = TEMPLATE.tmpl_support_box(support_data, ln, add_box=False, loading=False)
if autoclaim_data['hidden']:
autoclaim_successful_recs = None
autoclaim_unsuccessful_recs = None
else:
if not pinfo['orcid']['import_pubs'] and pinfo['autoclaim']['res'] is not None:
autoclaim_data = pinfo['autoclaim']['res']
autoclaim_successful_recs = autoclaim_data['successful_recids']
autoclaim_unsuccessful_recs = autoclaim_data['unsuccessful_recids']
else:
login_status = webapi.get_login_info(uid, params)
autoclaim_ticket = pinfo['autoclaim']['ticket']
external_pubs_association = pinfo['autoclaim']['external_pubs_association']
remote_systems = login_status['logged_in_to_remote_systems']
papers_to_autoclaim = set(webapi.get_papers_from_remote_systems(remote_systems,
params,
external_pubs_association))
for paper in papers_to_autoclaim:
operation_parts = {'pid': person_id,
'action': 'assign',
'bibrefrec': str(paper)}
operation_to_be_added = webapi.construct_operation(operation_parts,
pinfo,
uid)
if operation_to_be_added is None:
# In case the operation could not be created (because of an
# erroneous bibrefrec) ignore it and continue with the rest
continue
webapi.add_operation_to_ticket(operation_to_be_added, autoclaim_ticket)
additional_info = {'first_name': '', 'last_name': '', 'email': '',
'comments': 'Assigned automatically when autoclaim was triggered.'}
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=False)
if 'email' in session:
userinfo['email'] = session['email']
elif 'email' not in userinfo:
userinfo['email'] = None
webapi.commit_operations_from_ticket(autoclaim_ticket, userinfo, uid, ulevel)
already_claimed_recids = set(
[rec for _, _, rec in get_claimed_papers_of_author(person_id)]) & papers_to_autoclaim
successful_recids = set([op['rec'] for op in webapi.get_ticket_status(
autoclaim_ticket) if 'execution_result' in op]) | already_claimed_recids
webapi.clean_ticket(autoclaim_ticket)
unsuccessful_recids = [op['rec'] for op in webapi.get_ticket_status(autoclaim_ticket)]
autoclaim_data['recids_to_external_ids'] = dict()
for key, value in external_pubs_association.iteritems():
ext_system, ext_id = key
rec = value
title = get_title_of_paper(rec)
autoclaim_data['recids_to_external_ids'][rec] = title
autoclaim_successful_recs = [(
autoclaim_data['recids_to_external_ids'][recid],
get_inspire_record_url(recid),
recid) for recid in successful_recids]
autoclaim_unsuccessful_recs = [(
autoclaim_data['recids_to_external_ids'][recid],
get_inspire_record_url(recid),
recid) for recid in unsuccessful_recids]
# cache the result in the session
autoclaim_data['successful_recids'] = autoclaim_successful_recs
autoclaim_data['unsuccessful_recids'] = autoclaim_unsuccessful_recs
pinfo['autoclaim']['res'] = autoclaim_data
if pinfo['orcid']['import_pubs']:
pinfo['orcid']['import_pubs'] = False
session.dirty = True
template_parameters = {
"autoclaim_successful_recids": autoclaim_successful_recs,
"autoclaim_unsuccessful_recids": autoclaim_unsuccessful_recs,
"review_autoclaim_link": "%s/author/ticket/review_autoclaim" % CFG_SITE_URL,
"merge": TEMPLATE.tmpl_merge_box(merge_data, ln, add_box=False, loading=False),
"external_ids_box_html": ids_box_html,
"user_level": ulevel,
"base_url": CFG_BASE_URL,
"inspire" : CFG_INSPIRE_SITE,
"orcid_message" : self._generate_orcid_message(req, ln)
}
if 'orcid_info' in session:
session.pop('orcid_info', None)
session.dirty = True
# Inspire specific endpoints.
if CFG_INSPIRE_SITE:
template_parameters["hepnames"] = html_hepnames
template_parameters["arxiv"] = html_arxiv
template_parameters["orcid"] = html_orcid
template_parameters["contact"] = html_support
template_parameters["modal"] = modal
body = profile_page.get_wrapped_body("manage_profile", template_parameters)
# body = profile_page.get_wrapped_body("generic", {'html': content})
return page(title=page_title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _generate_orcid_message(self, req, ln):
'''
Generate the box which informs the user about running ORCID push.
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
orcid_info = None
if 'orcid_info' in session:
orcid_info = session['orcid_info']['status']
if not orcid_info:
return ''
else:
return TEMPLATE.tmpl_orcid_message(orcid_info, ln)
def import_orcid_pubs(self, req, form):
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
orcid_info = pinfo['orcid']
orcid_id, orcid_dois = get_dois_from_orcid_using_pid(pinfo['pid'])
# TODO: what to do in case some ORCID server error occurs?
if orcid_id is None or orcid_dois is None:
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
# TODO: it would be smarter if:
# 1. we save in the db the orcid_dois
# 2. to expire only the external pubs box in the profile page
webauthorapi.expire_all_cache_for_personid(pinfo['pid'])
orcid_info['imported_pubs'] = orcid_dois
orcid_info['import_pubs'] = True
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
def _get_identifier_from_path(self, path):
'''Return identifier from path to manage_profile page.
Example: localhost:4000/author/manage_profile/273672/wowow -> 273672
'''
tokens = path.split('/')
return tokens[tokens.index('manage_profile') + 1]
def push_orcid_pubs(self, req, form):
'''Push all claimed papers to ORCID database.
Doesn't push papers which were there earlier. Needs user authentication.
When a user requests a push, this method will be run twice. Firstly,
user should authenticate himself. Then, in the second run, after
receiving the token from ORCID, the push is done.
'''
webapi.session_bareinit(req)
session = get_session(req)
if 'orcid_pid' not in session:
# I can't assume that pid will be available in session
identifier = self._get_identifier_from_path(req.referer)
try:
session['orcid_pid'] = get_author_by_canonical_name(identifier)[0][0]
except:
session['orcid_pid'] = identifier
session.dirty = True
if 'oauth2_access_token' not in session:
session['oauth2_access_token'] = ''
if session['oauth2_access_token'] == '':
# Authenticate
session['pushorcid'] = True
session.dirty = True
redirect_to_url(req, "%s/youraccount/oauth2?provider=%s&scope=/orcid-works/update+/orcid-works/create" % (CFG_SITE_SECURE_URL, 'orcid'))
# We expect user to have only one ORCID
assert(len(webapi.get_orcids_by_pid(session['orcid_pid'])) == 1)
if session['oauth2_orcid'] != webapi.get_orcids_by_pid(session['orcid_pid'])[0]:
# User has authenticated, but he is using different account
session['oauth2_access_token'] = ''
session['orcid_info'] = {'status': 'wrong_account'}
person_id = session.pop('orcid_pid')
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, person_id))
set_token(session['orcid_pid'], session['oauth2_access_token'])
session['orcid_info'] = {'status': 'finished'}
# Token may expire. It is better to get rid of it.
session['oauth2_access_token'] = ''
person_id = session.pop('orcid_pid')
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, person_id))
def connect_author_with_hepname(self, req, form):
argd = wash_urlargd(form, {'cname': (str, None),
'hepname': (str, None),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['cname'] is not None:
cname = argd['cname']
else:
return self._error_page(req, ln, "Fatal: cannot associate a hepname without a person id.")
if argd['hepname'] is not None:
hepname = argd['hepname']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid hepname.")
webapi.session_bareinit(req)
session = get_session(req)
webapi.connect_author_with_hepname(cname, hepname, session['uid'])
pinfo = session['personinfo']
last_visited_page = webapi.history_get_last_visited_url(pinfo['visit_diary'], just_page=True)
redirect_to_url(req, "%s/author/%s/%s" % (CFG_SITE_URL, last_visited_page, cname))
def connect_author_with_hepname_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
cname = json_data['cname']
hepname = json_data['hepname']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if not self._is_admin(pinfo):
if 'email' in json_data:
pinfo['form_email'] = json_data['email']
webapi.connect_author_with_hepname(cname, hepname,
session['uid'],
email=json_data['email'])
else:
webapi.connect_author_with_hepname(cname, hepname,
session['uid'])
else:
uid = getUid(req)
add_cname_to_hepname_record({cname: hepname}, uid)
def suggest_orcid(self, req, form):
argd = wash_urlargd(form, {'orcid': (str, None),
'pid': (int, -1),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an orcid without a person id.")
if argd['orcid'] is not None and is_valid_orcid(argd['orcid']):
orcid = argd['orcid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid ORCID.")
session = get_session(req)
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid, session['uid'])
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, pid))
def suggest_orcid_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
| json_data = json.loads(str(form['jsondata'])) | 9,234 | lcc_e | python | null | 63e11b185b4a4678fb60b510a916fad3d5a0c78f2fe82ac4 |
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This module is also sponsored by E.T.A.I. (www.etai.fr)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vmware_guest
short_description: Manages virtual machines in vCenter
description: >
This module can be used to create new virtual machines from templates or other virtual machines,
manage power state of virtual machine such as power on, power off, suspend, shutdown, reboot, restart etc.,
modify various virtual machine components like network, disk, customization etc.,
rename a virtual machine and remove a virtual machine with associated components.
version_added: '2.2'
author:
- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
- Philippe Dellaert (@pdellaert) <philippe@dellaert.org>
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
requirements:
- python >= 2.6
- PyVmomi
notes:
- Please make sure that the user used for vmware_guest has the correct level of privileges.
- For example, following is the list of minimum privileges required by users to create virtual machines.
- " DataStore > Allocate Space"
- " Virtual Machine > Configuration > Add New Disk"
- " Virtual Machine > Configuration > Add or Remove Device"
- " Virtual Machine > Inventory > Create New"
- " Network > Assign Network"
- " Resource > Assign Virtual Machine to Resource Pool"
- "Module may require additional privileges as well, which may be required for gathering facts - e.g. ESXi configurations."
- Tested on vSphere 5.5, 6.0, 6.5 and 6.7
- Use SCSI disks instead of IDE when you want to expand online disks by specifying a SCSI controller
- "For additional information please visit Ansible VMware community wiki - U(https://github.com/ansible/community/wiki/VMware)."
options:
state:
description:
- Specify the state the virtual machine should be in.
- 'If C(state) is set to C(present) and virtual machine exists, ensure the virtual machine
configurations conforms to task arguments.'
- 'If C(state) is set to C(absent) and virtual machine exists, then the specified virtual machine
is removed with its associated components.'
- 'If C(state) is set to one of the following C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists, then virtual machine is deployed with given parameters.'
- 'If C(state) is set to C(poweredon) and virtual machine exists with powerstate other than powered on,
then the specified virtual machine is powered on.'
- 'If C(state) is set to C(poweredoff) and virtual machine exists with powerstate other than powered off,
then the specified virtual machine is powered off.'
- 'If C(state) is set to C(restarted) and virtual machine exists, then the virtual machine is restarted.'
- 'If C(state) is set to C(suspended) and virtual machine exists, then the virtual machine is set to suspended mode.'
- 'If C(state) is set to C(shutdownguest) and virtual machine exists, then the virtual machine is shutdown.'
- 'If C(state) is set to C(rebootguest) and virtual machine exists, then the virtual machine is rebooted.'
default: present
choices: [ present, absent, poweredon, poweredoff, restarted, suspended, shutdownguest, rebootguest ]
name:
description:
- Name of the virtual machine to work with.
- Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
- 'If multiple virtual machines with same name exists, then C(folder) is required parameter to
identify uniqueness of the virtual machine.'
- This parameter is required, if C(state) is set to C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists.
- This parameter is case sensitive.
required: yes
name_match:
description:
- If multiple virtual machines matching the name, use the first or last found.
default: 'first'
choices: [ first, last ]
uuid:
description:
- UUID of the virtual machine to manage if known, this is VMware's unique identifier.
- This is required if C(name) is not supplied.
- If virtual machine does not exists, then this parameter is ignored.
- Please note that a supplied UUID will be ignored on virtual machine creation, as VMware creates the UUID internally.
use_instance_uuid:
description:
- Whether to use the VMware instance UUID rather than the BIOS UUID.
default: no
type: bool
version_added: '2.8'
template:
description:
- Template or existing virtual machine used to create new virtual machine.
- If this value is not set, virtual machine is created without using a template.
- If the virtual machine already exists, this parameter will be ignored.
- This parameter is case sensitive.
- You can also specify template or VM UUID for identifying source. version_added 2.8. Use C(hw_product_uuid) from M(vmware_guest_facts) as UUID value.
- From version 2.8 onwards, absolute path to virtual machine or template can be used.
aliases: [ 'template_src' ]
is_template:
description:
- Flag the instance as a template.
- This will mark the given virtual machine as template.
default: 'no'
type: bool
version_added: '2.3'
folder:
description:
- Destination folder, absolute path to find an existing guest or create the new guest.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter.
- This parameter is case sensitive.
- This parameter is required, while deploying new virtual machine. version_added 2.5.
- 'If multiple machines are found with same name, this parameter is used to identify
uniqueness of the virtual machine. version_added 2.5'
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
hardware:
description:
- Manage virtual machine's hardware attributes.
- All parameters case sensitive.
- 'Valid attributes are:'
- ' - C(hotadd_cpu) (boolean): Allow virtual CPUs to be added while the virtual machine is running.'
- ' - C(hotremove_cpu) (boolean): Allow virtual CPUs to be removed while the virtual machine is running.
version_added: 2.5'
- ' - C(hotadd_memory) (boolean): Allow memory to be added while the virtual machine is running.'
- ' - C(memory_mb) (integer): Amount of memory in MB.'
- ' - C(nested_virt) (bool): Enable nested virtualization. version_added: 2.5'
- ' - C(num_cpus) (integer): Number of CPUs.'
- ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket.'
- " C(num_cpus) must be a multiple of C(num_cpu_cores_per_socket).
For example to create a VM with 2 sockets of 4 cores, specify C(num_cpus): 8 and C(num_cpu_cores_per_socket): 4"
- ' - C(scsi) (string): Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual) (default).'
- " - C(memory_reservation_lock) (boolean): If set true, memory resource reservation for the virtual machine
will always be equal to the virtual machine's memory size. version_added: 2.5"
- ' - C(max_connections) (integer): Maximum number of active remote display connections for the virtual machines.
version_added: 2.5.'
- ' - C(mem_limit) (integer): The memory utilization of a virtual machine will not exceed this limit. Unit is MB.
version_added: 2.5'
- ' - C(mem_reservation) (integer): The amount of memory resource that is guaranteed available to the virtual
machine. Unit is MB. C(memory_reservation) is alias to this. version_added: 2.5'
- ' - C(cpu_limit) (integer): The CPU utilization of a virtual machine will not exceed this limit. Unit is MHz.
version_added: 2.5'
- ' - C(cpu_reservation) (integer): The amount of CPU resource that is guaranteed available to the virtual machine.
Unit is MHz. version_added: 2.5'
- ' - C(version) (integer): The Virtual machine hardware versions. Default is 10 (ESXi 5.5 and onwards).
Please check VMware documentation for correct virtual machine hardware version.
Incorrect hardware version may lead to failure in deployment. If hardware version is already equal to the given
version then no action is taken. version_added: 2.6'
- ' - C(boot_firmware) (string): Choose which firmware should be used to boot the virtual machine.
Allowed values are "bios" and "efi". version_added: 2.7'
- ' - C(virt_based_security) (bool): Enable Virtualization Based Security feature for Windows 10.
(Support from Virtual machine hardware version 14, Guest OS Windows 10 64 bit, Windows Server 2016)'
guest_id:
description:
- Set the guest ID.
- This parameter is case sensitive.
- 'Examples:'
- " virtual machine with RHEL7 64 bit, will be 'rhel7_64Guest'"
- " virtual machine with CentOS 64 bit, will be 'centos64Guest'"
- " virtual machine with Ubuntu 64 bit, will be 'ubuntu64Guest'"
- This field is required when creating a virtual machine, not required when creating from the template.
- >
Valid values are referenced here:
U(https://code.vmware.com/apis/358/doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html)
version_added: '2.3'
disk:
description:
- A list of disks to add.
- This parameter is case sensitive.
- Shrinking disks is not supported.
- Removing existing disks of the virtual machine is not supported.
- 'Valid attributes are:'
- ' - C(size_[tb,gb,mb,kb]) (integer): Disk storage size in specified unit.'
- ' - C(type) (string): Valid values are:'
- ' - C(thin) thin disk'
- ' - C(eagerzeroedthick) eagerzeroedthick disk, added in version 2.5'
- ' Default: C(None) thick disk, no eagerzero.'
- ' - C(datastore) (string): The name of datastore which will be used for the disk. If C(autoselect_datastore) is set to True,
then will select the less used datastore whose name contains this "disk.datastore" string.'
- ' - C(filename) (string): Existing disk image to be used. Filename must already exist on the datastore.'
- ' Specify filename string in C([datastore_name] path/to/file.vmdk) format. Added in version 2.8.'
- ' - C(autoselect_datastore) (bool): select the less used datastore. "disk.datastore" and "disk.autoselect_datastore"
will not be used if C(datastore) is specified outside this C(disk) configuration.'
- ' - C(disk_mode) (string): Type of disk mode. Added in version 2.6'
- ' - Available options are :'
- ' - C(persistent): Changes are immediately and permanently written to the virtual disk. This is default.'
- ' - C(independent_persistent): Same as persistent, but not affected by snapshots.'
- ' - C(independent_nonpersistent): Changes to virtual disk are made to a redo log and discarded at power off, but not affected by snapshots.'
cdrom:
description:
- A CD-ROM configuration for the virtual machine.
- Or a list of CD-ROMs configuration for the virtual machine. Added in version 2.9.
- 'Parameters C(controller_type), C(controller_number), C(unit_number), C(state) are added for a list of CD-ROMs
configuration support.'
- 'Valid attributes are:'
- ' - C(type) (string): The type of CD-ROM, valid options are C(none), C(client) or C(iso). With C(none) the CD-ROM
will be disconnected but present.'
- ' - C(iso_path) (string): The datastore path to the ISO file to use, in the form of C([datastore1] path/to/file.iso).
Required if type is set C(iso).'
- ' - C(controller_type) (string): Default value is C(ide). Only C(ide) controller type for CD-ROM is supported for
now, will add SATA controller type in the future.'
- ' - C(controller_number) (int): For C(ide) controller, valid value is 0 or 1.'
- ' - C(unit_number) (int): For CD-ROM device attach to C(ide) controller, valid value is 0 or 1.
C(controller_number) and C(unit_number) are mandatory attributes.'
- ' - C(state) (string): Valid value is C(present) or C(absent). Default is C(present). If set to C(absent), then
the specified CD-ROM will be removed. For C(ide) controller, hot-add or hot-remove CD-ROM is not supported.'
version_added: '2.5'
resource_pool:
description:
- Use the given resource pool for virtual machine operation.
- This parameter is case sensitive.
- Resource pool should be child of the selected host parent.
version_added: '2.3'
wait_for_ip_address:
description:
- Wait until vCenter detects an IP address for the virtual machine.
- This requires vmware-tools (vmtoolsd) to properly work after creation.
- "vmware-tools needs to be installed on the given virtual machine in order to work with this parameter."
default: 'no'
type: bool
wait_for_customization:
description:
- Wait until vCenter detects all guest customizations as successfully completed.
- When enabled, the VM will automatically be powered on.
default: 'no'
type: bool
version_added: '2.8'
state_change_timeout:
description:
- If the C(state) is set to C(shutdownguest), by default the module will return immediately after sending the shutdown signal.
- If this argument is set to a positive integer, the module will instead wait for the virtual machine to reach the poweredoff state.
- The value sets a timeout in seconds for the module to wait for the state change.
default: 0
version_added: '2.6'
snapshot_src:
description:
- Name of the existing snapshot to use to create a clone of a virtual machine.
- This parameter is case sensitive.
- While creating linked clone using C(linked_clone) parameter, this parameter is required.
version_added: '2.4'
linked_clone:
description:
- Whether to create a linked clone from the snapshot specified.
- If specified, then C(snapshot_src) is required parameter.
default: 'no'
type: bool
version_added: '2.4'
force:
description:
- Ignore warnings and complete the actions.
- This parameter is useful while removing virtual machine which is powered on state.
- 'This module reflects the VMware vCenter API and UI workflow, as such, in some cases the `force` flag will
be mandatory to perform the action to ensure you are certain the action has to be taken, no matter what the consequence.
This is specifically the case for removing a powered on the virtual machine when C(state) is set to C(absent).'
default: 'no'
type: bool
datacenter:
description:
- Destination datacenter for the deploy operation.
- This parameter is case sensitive.
default: ha-datacenter
cluster:
description:
- The cluster name where the virtual machine will run.
- This is a required parameter, if C(esxi_hostname) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
version_added: '2.3'
esxi_hostname:
description:
- The ESXi hostname where the virtual machine will run.
- This is a required parameter, if C(cluster) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
annotation:
description:
- A note or annotation to include in the virtual machine.
version_added: '2.3'
customvalues:
description:
- Define a list of custom values to set on virtual machine.
- A custom value object takes two fields C(key) and C(value).
- Incorrect key and values will be ignored.
version_added: '2.3'
networks:
description:
- A list of networks (in the order of the NICs).
- Removing NICs is not allowed, while reconfiguring the virtual machine.
- All parameters and VMware object names are case sensitive.
- 'One of the below parameters is required per entry:'
- ' - C(name) (string): Name of the portgroup or distributed virtual portgroup for this interface.
When specifying distributed virtual portgroup make sure given C(esxi_hostname) or C(cluster) is associated with it.'
- ' - C(vlan) (integer): VLAN number for this interface.'
- 'Optional parameters per entry (used for virtual hardware):'
- ' - C(device_type) (string): Virtual network device (one of C(e1000), C(e1000e), C(pcnet32), C(vmxnet2), C(vmxnet3) (default), C(sriov)).'
- ' - C(mac) (string): Customize MAC address.'
- ' - C(dvswitch_name) (string): Name of the distributed vSwitch.
This value is required if multiple distributed portgroups exists with the same name. version_added 2.7'
- ' - C(start_connected) (bool): Indicates that virtual network adapter starts with associated virtual machine powers on. version_added: 2.5'
- 'Optional parameters per entry (used for OS customization):'
- ' - C(type) (string): Type of IP assignment (either C(dhcp) or C(static)). C(dhcp) is default.'
- ' - C(ip) (string): Static IP address (implies C(type: static)).'
- ' - C(netmask) (string): Static netmask required for C(ip).'
- ' - C(gateway) (string): Static gateway.'
- ' - C(dns_servers) (string): DNS servers for this network interface (Windows).'
- ' - C(domain) (string): Domain name for this network interface (Windows).'
- ' - C(wake_on_lan) (bool): Indicates if wake-on-LAN is enabled on this virtual network adapter. version_added: 2.5'
- ' - C(allow_guest_control) (bool): Enables guest control over whether the connectable device is connected. version_added: 2.5'
version_added: '2.3'
customization:
description:
- Parameters for OS customization when cloning from the template or the virtual machine, or apply to the existing virtual machine directly.
- Not all operating systems are supported for customization with respective vCenter version,
please check VMware documentation for respective OS customization.
- For supported customization operating system matrix, (see U(http://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf))
- All parameters and VMware object names are case sensitive.
- Linux based OSes requires Perl package to be installed for OS customizations.
- 'Common parameters (Linux/Windows):'
- ' - C(existing_vm) (bool): If set to C(True), do OS customization on the specified virtual machine directly.
If set to C(False) or not specified, do OS customization when cloning from the template or the virtual machine. version_added: 2.8'
- ' - C(dns_servers) (list): List of DNS servers to configure.'
- ' - C(dns_suffix) (list): List of domain suffixes, also known as DNS search path (default: C(domain) parameter).'
- ' - C(domain) (string): DNS domain name to use.'
- ' - C(hostname) (string): Computer hostname (default: shorted C(name) parameter). Allowed characters are alphanumeric (uppercase and lowercase)
and minus, rest of the characters are dropped as per RFC 952.'
- 'Parameters related to Linux customization:'
- ' - C(timezone) (string): Timezone (See List of supported time zones for different vSphere versions in Linux/Unix
systems (2145518) U(https://kb.vmware.com/s/article/2145518)). version_added: 2.9'
- ' - C(hwclockUTC) (bool): Specifies whether the hardware clock is in UTC or local time.
True when the hardware clock is in UTC, False when the hardware clock is in local time. version_added: 2.9'
- 'Parameters related to Windows customization:'
- ' - C(autologon) (bool): Auto logon after virtual machine customization (default: False).'
- ' - C(autologoncount) (int): Number of autologon after reboot (default: 1).'
- ' - C(domainadmin) (string): User used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(domainadminpassword) (string): Password used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(fullname) (string): Server owner name (default: Administrator).'
- ' - C(joindomain) (string): AD domain to join (Not compatible with C(joinworkgroup)).'
- ' - C(joinworkgroup) (string): Workgroup to join (Not compatible with C(joindomain), default: WORKGROUP).'
- ' - C(orgname) (string): Organisation name (default: ACME).'
- ' - C(password) (string): Local administrator password.'
- ' - C(productid) (string): Product ID.'
- ' - C(runonce) (list): List of commands to run at first user logon.'
- ' - C(timezone) (int): Timezone (See U(https://msdn.microsoft.com/en-us/library/ms912391.aspx)).'
version_added: '2.3'
vapp_properties:
description:
- A list of vApp properties
- 'For full list of attributes and types refer to: U(https://github.com/vmware/pyvmomi/blob/master/docs/vim/vApp/PropertyInfo.rst)'
- 'Basic attributes are:'
- ' - C(id) (string): Property id - required.'
- ' - C(value) (string): Property value.'
- ' - C(type) (string): Value type, string type by default.'
- ' - C(operation): C(remove): This attribute is required only when removing properties.'
version_added: '2.6'
customization_spec:
description:
- Unique name identifying the requested customization specification.
- This parameter is case sensitive.
- If set, then overrides C(customization) parameter values.
version_added: '2.6'
datastore:
description:
- Specify datastore or datastore cluster to provision virtual machine.
- 'This parameter takes precedence over "disk.datastore" parameter.'
- 'This parameter can be used to override datastore or datastore cluster setting of the virtual machine when deployed
from the template.'
- Please see example for more usage.
version_added: '2.7'
convert:
description:
- Specify convert disk type while cloning template or virtual machine.
choices: [ thin, thick, eagerzeroedthick ]
version_added: '2.8'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Create a virtual machine on given ESXi hostname
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /DC1/vm/
name: test_vm_0001
state: poweredon
guest_id: centos64Guest
# This is hostname of particular ESXi server on which user wants VM to be deployed
esxi_hostname: "{{ esxi_hostname }}"
disk:
- size_gb: 10
type: thin
datastore: datastore1
hardware:
memory_mb: 512
num_cpus: 4
scsi: paravirtual
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
ip: 10.10.10.100
netmask: 255.255.255.0
device_type: vmxnet3
wait_for_ip_address: yes
delegate_to: localhost
register: deploy_vm
- name: Create a virtual machine from a template
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /testvms
name: testvm_2
state: poweredon
template: template_el7
disk:
- size_gb: 10
type: thin
datastore: g73_datastore
hardware:
memory_mb: 512
num_cpus: 6
num_cpu_cores_per_socket: 3
scsi: paravirtual
memory_reservation_lock: True
mem_limit: 8096
mem_reservation: 4096
cpu_limit: 8096
cpu_reservation: 4096
max_connections: 5
hotadd_cpu: True
hotremove_cpu: True
hotadd_memory: False
version: 12 # Hardware version of virtual machine
boot_firmware: "efi"
cdrom:
type: iso
iso_path: "[datastore1] livecd.iso"
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
wait_for_ip_address: yes
delegate_to: localhost
register: deploy
- name: Clone a virtual machine from Windows template and customize
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: datacenter1
cluster: cluster
name: testvm-2
template: template_windows
networks:
- name: VM Network
ip: 192.168.1.100
netmask: 255.255.255.0
gateway: 192.168.1.1
mac: aa:bb:dd:aa:00:14
domain: my_domain
dns_servers:
- 192.168.1.1
- 192.168.1.2
- vlan: 1234
type: dhcp
customization:
autologon: yes
dns_servers:
- 192.168.1.1
- 192.168.1.2
domain: my_domain
password: new_vm_password
runonce:
- powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
delegate_to: localhost
- name: Clone a virtual machine from Linux template and customize
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: "{{ datacenter }}"
state: present
folder: /DC1/vm
template: "{{ template }}"
name: "{{ vm_name }}"
cluster: DC1_C1
networks:
- name: VM Network
ip: 192.168.10.11
netmask: 255.255.255.0
wait_for_ip_address: True
customization:
domain: "{{ guest_domain }}"
dns_servers:
- 8.9.9.9
- 7.8.8.9
dns_suffix:
- example.com
- example2.com
delegate_to: localhost
- name: Rename a virtual machine (requires the virtual machine's uuid)
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
name: new_name
state: present
delegate_to: localhost
- name: Remove a virtual machine by uuid
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
state: absent
delegate_to: localhost
- name: Manipulate vApp properties
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
name: vm_name
state: present
vapp_properties:
- id: remoteIP
category: Backup
label: Backup server IP
type: str
value: 10.10.10.1
- id: old_property
operation: remove
delegate_to: localhost
- name: Set powerstate of a virtual machine to poweroff by using UUID
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
state: poweredoff
delegate_to: localhost
- name: Deploy a virtual machine in a datastore different from the datastore of the template
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
name: "{{ vm_name }}"
state: present
template: "{{ template_name }}"
# Here datastore can be different which holds template
datastore: "{{ virtual_machine_datastore }}"
hardware:
memory_mb: 512
num_cpus: 2
scsi: paravirtual
delegate_to: localhost
'''
RETURN = r'''
instance:
description: metadata about the new virtual machine
returned: always
type: dict
sample: None
'''
import re
import time
import string
HAS_PYVMOMI = False
try:
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
pass
from random import randint
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.network import is_mac
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.vmware import (find_obj, gather_vm_facts, get_all_objs,
compile_folder_path_for_object, serialize_spec,
vmware_argument_spec, set_vm_power_state, PyVmomi,
find_dvs_by_name, find_dvspg_by_name, wait_for_vm_ip,
wait_for_task, TaskError)
def list_or_dict(value):
if isinstance(value, list) or isinstance(value, dict):
return value
else:
raise ValueError("'%s' is not valid, valid type is 'list' or 'dict'." % value)
class PyVmomiDeviceHelper(object):
""" This class is a helper to create easily VMware Objects for PyVmomiHelper """
def __init__(self, module):
self.module = module
self.next_disk_unit_number = 0
self.scsi_device_type = {
'lsilogic': vim.vm.device.VirtualLsiLogicController,
'paravirtual': vim.vm.device.ParaVirtualSCSIController,
'buslogic': vim.vm.device.VirtualBusLogicController,
'lsilogicsas': vim.vm.device.VirtualLsiLogicSASController,
}
def create_scsi_controller(self, scsi_type):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
scsi_device = self.scsi_device_type.get(scsi_type, vim.vm.device.ParaVirtualSCSIController)
scsi_ctl.device = scsi_device()
scsi_ctl.device.busNumber = 0
# While creating a new SCSI controller, temporary key value
# should be unique negative integers
scsi_ctl.device.key = -randint(1000, 9999)
scsi_ctl.device.hotAddRemove = True
scsi_ctl.device.sharedBus = 'noSharing'
scsi_ctl.device.scsiCtlrUnitNumber = 7
return scsi_ctl
def is_scsi_controller(self, device):
return isinstance(device, tuple(self.scsi_device_type.values()))
@staticmethod
def create_ide_controller(bus_number=0):
ide_ctl = vim.vm.device.VirtualDeviceSpec()
ide_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
ide_ctl.device = vim.vm.device.VirtualIDEController()
ide_ctl.device.deviceInfo = vim.Description()
# While creating a new IDE controller, temporary key value
# should be unique negative integers
ide_ctl.device.key = -randint(200, 299)
ide_ctl.device.busNumber = bus_number
return ide_ctl
@staticmethod
def create_cdrom(ide_device, cdrom_type, iso_path=None, unit_number=0):
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
cdrom_spec.device = vim.vm.device.VirtualCdrom()
cdrom_spec.device.controllerKey = ide_device.key
cdrom_spec.device.key = -randint(3000, 3999)
cdrom_spec.device.unitNumber = unit_number
cdrom_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_spec.device.connectable.allowGuestControl = True
cdrom_spec.device.connectable.startConnected = (cdrom_type != "none")
if cdrom_type in ["none", "client"]:
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif cdrom_type == "iso":
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
return cdrom_spec
@staticmethod
def is_equal_cdrom(vm_obj, cdrom_device, cdrom_type, iso_path):
if cdrom_type == "none":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
not cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or not cdrom_device.connectable.connected))
elif cdrom_type == "client":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
elif cdrom_type == "iso":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.IsoBackingInfo) and
cdrom_device.backing.fileName == iso_path and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
@staticmethod
def update_cdrom_config(vm_obj, cdrom_spec, cdrom_device, iso_path=None):
# Updating an existing CD-ROM
if cdrom_spec["type"] in ["client", "none"]:
cdrom_device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif cdrom_spec["type"] == "iso" and iso_path is not None:
cdrom_device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
cdrom_device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_device.connectable.allowGuestControl = True
cdrom_device.connectable.startConnected = (cdrom_spec["type"] != "none")
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_device.connectable.connected = (cdrom_spec["type"] != "none")
def remove_cdrom(self, cdrom_device):
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
cdrom_spec.device = cdrom_device
return cdrom_spec
def create_scsi_disk(self, scsi_ctl, disk_index=None):
diskspec = vim.vm.device.VirtualDeviceSpec()
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
diskspec.device = vim.vm.device.VirtualDisk()
diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
diskspec.device.controllerKey = scsi_ctl.device.key
if self.next_disk_unit_number == 7:
raise AssertionError()
if disk_index == 7:
raise AssertionError()
"""
Configure disk unit number.
"""
if disk_index is not None:
diskspec.device.unitNumber = disk_index
self.next_disk_unit_number = disk_index + 1
else:
diskspec.device.unitNumber = self.next_disk_unit_number
self.next_disk_unit_number += 1
# unit number 7 is reserved to SCSI controller, increase next index
if self.next_disk_unit_number == 7:
self.next_disk_unit_number += 1
return diskspec
def get_device(self, device_type, name):
nic_dict = dict(pcnet32=vim.vm.device.VirtualPCNet32(),
vmxnet2=vim.vm.device.VirtualVmxnet2(),
vmxnet3=vim.vm.device.VirtualVmxnet3(),
e1000=vim.vm.device.VirtualE1000(),
e1000e=vim.vm.device.VirtualE1000e(),
sriov=vim.vm.device.VirtualSriovEthernetCard(),
)
if device_type in nic_dict:
return nic_dict[device_type]
else:
self.module.fail_json(msg='Invalid device_type "%s"'
' for network "%s"' % (device_type, name))
def create_nic(self, device_type, device_label, device_infos):
nic = vim.vm.device.VirtualDeviceSpec()
nic.device = self.get_device(device_type, device_infos['name'])
nic.device.wakeOnLanEnabled = bool(device_infos.get('wake_on_lan', True))
nic.device.deviceInfo = vim.Description()
nic.device.deviceInfo.label = device_label
nic.device.deviceInfo.summary = device_infos['name']
nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic.device.connectable.startConnected = bool(device_infos.get('start_connected', True))
nic.device.connectable.allowGuestControl = bool(device_infos.get('allow_guest_control', True))
nic.device.connectable.connected = True
if 'mac' in device_infos and is_mac(device_infos['mac']):
nic.device.addressType = 'manual'
nic.device.macAddress = device_infos['mac']
else:
nic.device.addressType = 'generated'
return nic
def integer_value(self, input_value, name):
"""
Function to return int value for given input, else return error
Args:
input_value: Input value to retrieve int value from
name: Name of the Input value (used to build error message)
Returns: (int) if integer value can be obtained, otherwise will send a error message.
"""
if isinstance(input_value, int):
return input_value
elif isinstance(input_value, str) and input_value.isdigit():
return int(input_value)
else:
self.module.fail_json(msg='"%s" attribute should be an'
' integer value.' % name)
class PyVmomiCache(object):
""" This class caches references to objects which are requested multiples times but not modified """
def __init__(self, content, dc_name=None):
self.content = content
self.dc_name = dc_name
self.networks = {}
self.clusters = {}
self.esx_hosts = {}
self.parent_datacenters = {}
def find_obj(self, content, types, name, confine_to_datacenter=True):
""" Wrapper around find_obj to set datacenter context """
result = find_obj(content, types, name)
if result and confine_to_datacenter:
if to_text(self.get_parent_datacenter(result).name) != to_text(self.dc_name):
result = None
objects = self.get_all_objs(content, types, confine_to_datacenter=True)
for obj in objects:
if name is None or to_text(obj.name) == to_text(name):
return obj
return result
def get_all_objs(self, content, types, confine_to_datacenter=True):
""" Wrapper around get_all_objs to set datacenter context """
objects = get_all_objs(content, types)
if confine_to_datacenter:
if hasattr(objects, 'items'):
# resource pools come back as a dictionary
# make a copy
for k, v in tuple(objects.items()):
parent_dc = self.get_parent_datacenter(k)
if parent_dc.name != self.dc_name:
del objects[k]
else:
# everything else should be a list
objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name]
return objects
def get_network(self, network):
if network not in self.networks:
self.networks[network] = self.find_obj(self.content, [vim.Network], network)
return self.networks[network]
def get_cluster(self, cluster):
if cluster not in self.clusters:
self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster)
return self.clusters[cluster]
def get_esx_host(self, host):
if host not in self.esx_hosts:
self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host)
return self.esx_hosts[host]
def get_parent_datacenter(self, obj):
""" Walk the parent tree to find the objects datacenter """
if isinstance(obj, vim.Datacenter):
return obj
if obj in self.parent_datacenters:
return self.parent_datacenters[obj]
datacenter = None
while True:
if not hasattr(obj, 'parent'):
break
obj = obj.parent
if isinstance(obj, vim.Datacenter):
datacenter = obj
break
self.parent_datacenters[obj] = datacenter
return datacenter
class PyVmomiHelper(PyVmomi):
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
self.device_helper = PyVmomiDeviceHelper(self.module)
self.configspec = None
self.relospec = None
self.change_detected = False # a change was detected and needs to be applied through reconfiguration
self.change_applied = False # a change was applied meaning at least one task succeeded
self.customspec = None
self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
def gather_facts(self, vm):
return gather_vm_facts(self.content, vm)
def remove_vm(self, vm):
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
if vm.summary.runtime.powerState.lower() == 'poweredon':
self.module.fail_json(msg="Virtual machine %s found in 'powered on' state, "
"please use 'force' parameter to remove or poweroff VM "
"and try removing VM again." % vm.name)
task = vm.Destroy()
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'destroy'}
else:
return {'changed': self.change_applied, 'failed': False}
def configure_guestid(self, vm_obj, vm_creation=False):
# guest_id is not required when using templates
if self.params['template']:
return
# guest_id is only mandatory on VM creation
if vm_creation and self.params['guest_id'] is None:
self.module.fail_json(msg="guest_id attribute is mandatory for VM creation")
if self.params['guest_id'] and \
(vm_obj is None or self.params['guest_id'].lower() != vm_obj.summary.config.guestId.lower()):
self.change_detected = True
self.configspec.guestId = self.params['guest_id']
def configure_resource_alloc_info(self, vm_obj):
"""
Function to configure resource allocation information about virtual machine
:param vm_obj: VM object in case of reconfigure, None in case of deploy
:return: None
"""
rai_change_detected = False
memory_allocation = vim.ResourceAllocationInfo()
cpu_allocation = vim.ResourceAllocationInfo()
if 'hardware' in self.params:
if 'mem_limit' in self.params['hardware']:
mem_limit = None
try:
mem_limit = int(self.params['hardware'].get('mem_limit'))
except ValueError:
self.module.fail_json(msg="hardware.mem_limit attribute should be an integer value.")
memory_allocation.limit = mem_limit
if vm_obj is None or memory_allocation.limit != vm_obj.config.memoryAllocation.limit:
rai_change_detected = True
if 'mem_reservation' in self.params['hardware'] or 'memory_reservation' in self.params['hardware']:
mem_reservation = self.params['hardware'].get('mem_reservation')
if mem_reservation is None:
mem_reservation = self.params['hardware'].get('memory_reservation')
try:
mem_reservation = int(mem_reservation)
except ValueError:
self.module.fail_json(msg="hardware.mem_reservation or hardware.memory_reservation should be an integer value.")
memory_allocation.reservation = mem_reservation
if vm_obj is None or \
memory_allocation.reservation != vm_obj.config.memoryAllocation.reservation:
rai_change_detected = True
if 'cpu_limit' in self.params['hardware']:
cpu_limit = None
try:
cpu_limit = int(self.params['hardware'].get('cpu_limit'))
except ValueError:
self.module.fail_json(msg="hardware.cpu_limit attribute should be an integer value.")
cpu_allocation.limit = cpu_limit
if vm_obj is None or cpu_allocation.limit != vm_obj.config.cpuAllocation.limit:
rai_change_detected = True
if 'cpu_reservation' in self.params['hardware']:
cpu_reservation = None
try:
cpu_reservation = int(self.params['hardware'].get('cpu_reservation'))
except ValueError:
self.module.fail_json(msg="hardware.cpu_reservation should be an integer value.")
cpu_allocation.reservation = cpu_reservation
if vm_obj is None or \
cpu_allocation.reservation != vm_obj.config.cpuAllocation.reservation:
rai_change_detected = True
if rai_change_detected:
self.configspec.memoryAllocation = memory_allocation
self.configspec.cpuAllocation = cpu_allocation
self.change_detected = True
def configure_cpu_and_memory(self, vm_obj, vm_creation=False):
# set cpu/memory/etc
if 'hardware' in self.params:
if 'num_cpus' in self.params['hardware']:
try:
num_cpus = int(self.params['hardware']['num_cpus'])
except ValueError:
self.module.fail_json(msg="hardware.num_cpus attribute should be an integer value.")
# check VM power state and cpu hot-add/hot-remove state before re-config VM
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
if not vm_obj.config.cpuHotRemoveEnabled and num_cpus < vm_obj.config.hardware.numCPU:
self.module.fail_json(msg="Configured cpu number is less than the cpu number of the VM, "
"cpuHotRemove is not enabled")
if not vm_obj.config.cpuHotAddEnabled and num_cpus > vm_obj.config.hardware.numCPU:
self.module.fail_json(msg="Configured cpu number is more than the cpu number of the VM, "
"cpuHotAdd is not enabled")
if 'num_cpu_cores_per_socket' in self.params['hardware']:
try:
num_cpu_cores_per_socket = int(self.params['hardware']['num_cpu_cores_per_socket'])
except ValueError:
self.module.fail_json(msg="hardware.num_cpu_cores_per_socket attribute "
"should be an integer value.")
if num_cpus % num_cpu_cores_per_socket != 0:
self.module.fail_json(msg="hardware.num_cpus attribute should be a multiple "
"of hardware.num_cpu_cores_per_socket")
self.configspec.numCoresPerSocket = num_cpu_cores_per_socket
if vm_obj is None or self.configspec.numCoresPerSocket != vm_obj.config.hardware.numCoresPerSocket:
self.change_detected = True
self.configspec.numCPUs = num_cpus
if vm_obj is None or self.configspec.numCPUs != vm_obj.config.hardware.numCPU:
self.change_detected = True
# num_cpu is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.num_cpus attribute is mandatory for VM creation")
if 'memory_mb' in self.params['hardware']:
try:
memory_mb = int(self.params['hardware']['memory_mb'])
except ValueError:
self.module.fail_json(msg="Failed to parse hardware.memory_mb value."
" Please refer the documentation and provide"
" correct value.")
# check VM power state and memory hotadd state before re-config VM
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
if vm_obj.config.memoryHotAddEnabled and memory_mb < vm_obj.config.hardware.memoryMB:
self.module.fail_json(msg="Configured memory is less than memory size of the VM, "
"operation is not supported")
elif not vm_obj.config.memoryHotAddEnabled and memory_mb != vm_obj.config.hardware.memoryMB:
self.module.fail_json(msg="memoryHotAdd is not enabled")
self.configspec.memoryMB = memory_mb
if vm_obj is None or self.configspec.memoryMB != vm_obj.config.hardware.memoryMB:
self.change_detected = True
# memory_mb is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.memory_mb attribute is mandatory for VM creation")
if 'hotadd_memory' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.memoryHotAddEnabled != bool(self.params['hardware']['hotadd_memory']):
self.module.fail_json(msg="Configure hotadd memory operation is not supported when VM is power on")
self.configspec.memoryHotAddEnabled = bool(self.params['hardware']['hotadd_memory'])
if vm_obj is None or self.configspec.memoryHotAddEnabled != vm_obj.config.memoryHotAddEnabled:
self.change_detected = True
if 'hotadd_cpu' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.cpuHotAddEnabled != bool(self.params['hardware']['hotadd_cpu']):
self.module.fail_json(msg="Configure hotadd cpu operation is not supported when VM is power on")
self.configspec.cpuHotAddEnabled = bool(self.params['hardware']['hotadd_cpu'])
if vm_obj is None or self.configspec.cpuHotAddEnabled != vm_obj.config.cpuHotAddEnabled:
self.change_detected = True
if 'hotremove_cpu' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.cpuHotRemoveEnabled != bool(self.params['hardware']['hotremove_cpu']):
self.module.fail_json(msg="Configure hotremove cpu operation is not supported when VM is power on")
self.configspec.cpuHotRemoveEnabled = bool(self.params['hardware']['hotremove_cpu'])
if vm_obj is None or self.configspec.cpuHotRemoveEnabled != vm_obj.config.cpuHotRemoveEnabled:
self.change_detected = True
if 'memory_reservation_lock' in self.params['hardware']:
self.configspec.memoryReservationLockedToMax = bool(self.params['hardware']['memory_reservation_lock'])
if vm_obj is None or self.configspec.memoryReservationLockedToMax != vm_obj.config.memoryReservationLockedToMax:
self.change_detected = True
if 'boot_firmware' in self.params['hardware']:
# boot firmware re-config can cause boot issue
if vm_obj is not None:
return
boot_firmware = self.params['hardware']['boot_firmware'].lower()
if boot_firmware not in ('bios', 'efi'):
self.module.fail_json(msg="hardware.boot_firmware value is invalid [%s]."
" Need one of ['bios', 'efi']." % boot_firmware)
self.configspec.firmware = boot_firmware
self.change_detected = True
def sanitize_cdrom_params(self):
# cdroms {'ide': [{num: 0, cdrom: []}, {}], 'sata': [{num: 0, cdrom: []}, {}, ...]}
cdroms = {'ide': [], 'sata': []}
expected_cdrom_spec = self.params.get('cdrom')
if expected_cdrom_spec:
for cdrom_spec in expected_cdrom_spec:
cdrom_spec['controller_type'] = cdrom_spec.get('controller_type', 'ide').lower()
if cdrom_spec['controller_type'] not in ['ide', 'sata']:
self.module.fail_json(msg="Invalid cdrom.controller_type: %s, valid value is 'ide' or 'sata'."
% cdrom_spec['controller_type'])
cdrom_spec['state'] = cdrom_spec.get('state', 'present').lower()
if cdrom_spec['state'] not in ['present', 'absent']:
self.module.fail_json(msg="Invalid cdrom.state: %s, valid value is 'present', 'absent'."
% cdrom_spec['state'])
if cdrom_spec['state'] == 'present':
if 'type' in cdrom_spec and cdrom_spec.get('type') not in ['none', 'client', 'iso']:
self.module.fail_json(msg="Invalid cdrom.type: %s, valid value is 'none', 'client' or 'iso'."
% cdrom_spec.get('type'))
if cdrom_spec.get('type') == 'iso' and not cdrom_spec.get('iso_path'):
self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.")
if cdrom_spec['controller_type'] == 'ide' and \
(cdrom_spec.get('controller_number') not in [0, 1] or cdrom_spec.get('unit_number') not in [0, 1]):
self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s, valid"
" values are 0 or 1 for IDE controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number')))
if cdrom_spec['controller_type'] == 'sata' and \
(cdrom_spec.get('controller_number') not in range(0, 4) or cdrom_spec.get('unit_number') not in range(0, 30)):
self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s,"
" valid controller_number value is 0-3, valid unit_number is 0-29"
" for SATA controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number')))
ctl_exist = False
for exist_spec in cdroms.get(cdrom_spec['controller_type']):
if exist_spec['num'] == cdrom_spec['controller_number']:
ctl_exist = True
exist_spec['cdrom'].append(cdrom_spec)
break
if not ctl_exist:
cdroms.get(cdrom_spec['controller_type']).append({'num': cdrom_spec['controller_number'], 'cdrom': [cdrom_spec]})
return cdroms
def configure_cdrom(self, vm_obj):
# Configure the VM CD-ROM
if self.params.get('cdrom'):
if vm_obj and vm_obj.config.template:
# Changing CD-ROM settings on a template is not supported
return
if isinstance(self.params.get('cdrom'), dict):
self.configure_cdrom_dict(vm_obj)
elif isinstance(self.params.get('cdrom'), list):
self.configure_cdrom_list(vm_obj)
def configure_cdrom_dict(self, vm_obj):
if self.params["cdrom"].get('type') not in ['none', 'client', 'iso']:
self.module.fail_json(msg="cdrom.type is mandatory. Options are 'none', 'client', and 'iso'.")
if self.params["cdrom"]['type'] == 'iso' and not self.params["cdrom"].get('iso_path'):
self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.")
cdrom_spec = None
cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj)
iso_path = self.params["cdrom"].get("iso_path")
if len(cdrom_devices) == 0:
# Creating new CD-ROM
ide_devices = self.get_vm_ide_devices(vm=vm_obj)
if len(ide_devices) == 0:
# Creating new IDE device
ide_ctl = self.device_helper.create_ide_controller()
ide_device = ide_ctl.device
self.change_detected = True
self.configspec.deviceChange.append(ide_ctl)
else:
ide_device = ide_devices[0]
if len(ide_device.device) > 3:
self.module.fail_json(msg="hardware.cdrom specified for a VM or template which already has 4"
" IDE devices of which none are a cdrom")
cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=self.params["cdrom"]["type"],
iso_path=iso_path)
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_spec.device.connectable.connected = (self.params["cdrom"]["type"] != "none")
elif not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_devices[0],
cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path):
self.device_helper.update_cdrom_config(vm_obj, self.params["cdrom"], cdrom_devices[0], iso_path=iso_path)
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdrom_spec.device = cdrom_devices[0]
if cdrom_spec:
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
def configure_cdrom_list(self, vm_obj):
configured_cdroms = self.sanitize_cdrom_params()
cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj)
# configure IDE CD-ROMs
if configured_cdroms['ide']:
ide_devices = self.get_vm_ide_devices(vm=vm_obj)
for expected_cdrom_spec in configured_cdroms['ide']:
ide_device = None
for device in ide_devices:
if device.busNumber == expected_cdrom_spec['num']:
ide_device = device
break
# if not find the matched ide controller or no existing ide controller
if not ide_device:
ide_ctl = self.device_helper.create_ide_controller(bus_number=expected_cdrom_spec['num'])
ide_device = ide_ctl.device
self.change_detected = True
self.configspec.deviceChange.append(ide_ctl)
for cdrom in expected_cdrom_spec['cdrom']:
cdrom_device = None
iso_path = cdrom.get('iso_path')
unit_number = cdrom.get('unit_number')
for target_cdrom in cdrom_devices:
if target_cdrom.controllerKey == ide_device.key and target_cdrom.unitNumber == unit_number:
cdrom_device = target_cdrom
break
# create new CD-ROM
if not cdrom_device and cdrom.get('state') != 'absent':
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-add.')
if len(ide_device.device) == 2:
self.module.fail_json(msg='Maximum number of CD-ROMs attached to IDE controller is 2.')
cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=cdrom['type'],
iso_path=iso_path, unit_number=unit_number)
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
# re-configure CD-ROM
elif cdrom_device and cdrom.get('state') != 'absent' and \
not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_device,
cdrom_type=cdrom['type'], iso_path=iso_path):
self.device_helper.update_cdrom_config(vm_obj, cdrom, cdrom_device, iso_path=iso_path)
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdrom_spec.device = cdrom_device
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
# delete CD-ROM
elif cdrom_device and cdrom.get('state') == 'absent':
if vm_obj and vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-remove.')
cdrom_spec = self.device_helper.remove_cdrom(cdrom_device)
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
# configure SATA CD-ROMs is not supported yet
if configured_cdroms['sata']:
pass
def configure_hardware_params(self, vm_obj):
"""
Function to configure hardware related configuration of virtual machine
Args:
vm_obj: virtual machine object
"""
if 'hardware' in self.params:
if 'max_connections' in self.params['hardware']:
# maxMksConnections == max_connections
self.configspec.maxMksConnections = int(self.params['hardware']['max_connections'])
if vm_obj is None or self.configspec.maxMksConnections != vm_obj.config.maxMksConnections:
self.change_detected = True
if 'nested_virt' in self.params['hardware']:
self.configspec.nestedHVEnabled = bool(self.params['hardware']['nested_virt'])
if vm_obj is None or self.configspec.nestedHVEnabled != bool(vm_obj.config.nestedHVEnabled):
self.change_detected = True
if 'version' in self.params['hardware']:
hw_version_check_failed = False
temp_version = self.params['hardware'].get('version', 10)
try:
temp_version = int(temp_version)
except ValueError:
hw_version_check_failed = True
if temp_version not in range(3, 15):
hw_version_check_failed = True
if hw_version_check_failed:
self.module.fail_json(msg="Failed to set hardware.version '%s' value as valid"
" values range from 3 (ESX 2.x) to 14 (ESXi 6.5 and greater)." % temp_version)
# Hardware version is denoted as "vmx-10"
version = "vmx-%02d" % temp_version
self.configspec.version = version
if vm_obj is None or self.configspec.version != vm_obj.config.version:
self.change_detected = True
if vm_obj is not None:
# VM exists and we need to update the hardware version
current_version = vm_obj.config.version
# current_version = "vmx-10"
version_digit = int(current_version.split("-", 1)[-1])
if temp_version < version_digit:
self.module.fail_json(msg="Current hardware version '%d' which is greater than the specified"
" version '%d'. Downgrading hardware version is"
" not supported. Please specify version greater"
" than the current version." % (version_digit,
temp_version))
new_version = "vmx-%02d" % temp_version
try:
task = vm_obj.UpgradeVM_Task(new_version)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'}
except vim.fault.AlreadyUpgraded:
# Don't fail if VM is already upgraded.
pass
if 'virt_based_security' in self.params['hardware']:
host_version = self.select_host().summary.config.product.version
if int(host_version.split('.')[0]) < 6 or (int(host_version.split('.')[0]) == 6 and int(host_version.split('.')[1]) < 7):
self.module.fail_json(msg="ESXi version %s not support VBS." % host_version)
guest_ids = ['windows9_64Guest', 'windows9Server64Guest']
if vm_obj is None:
guestid = self.configspec.guestId
else:
guestid = vm_obj.summary.config.guestId
if guestid not in guest_ids:
self.module.fail_json(msg="Guest '%s' not support VBS." % guestid)
if (vm_obj is None and int(self.configspec.version.split('-')[1]) >= 14) or \
(vm_obj and int(vm_obj.config.version.split('-')[1]) >= 14 and (vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOff)):
self.configspec.flags = vim.vm.FlagInfo()
self.configspec.flags.vbsEnabled = bool(self.params['hardware']['virt_based_security'])
if bool(self.params['hardware']['virt_based_security']):
self.configspec.flags.vvtdEnabled = True
self.configspec.nestedHVEnabled = True
if (vm_obj is None and self.configspec.firmware == 'efi') or \
(vm_obj and vm_obj.config.firmware == 'efi'):
self.configspec.bootOptions = vim.vm.BootOptions()
self.configspec.bootOptions.efiSecureBootEnabled = True
else:
self.module.fail_json(msg="Not support VBS when firmware is BIOS.")
if vm_obj is None or self.configspec.flags.vbsEnabled != vm_obj.config.flags.vbsEnabled:
self.change_detected = True
def get_device_by_type(self, vm=None, type=None):
device_list = []
if vm is None or type is None:
return device_list
for device in vm.config.hardware.device:
if isinstance(device, type):
device_list.append(device)
return device_list
def get_vm_cdrom_devices(self, vm=None):
return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualCdrom)
def get_vm_ide_devices(self, vm=None):
return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualIDEController)
def get_vm_network_interfaces(self, vm=None):
device_list = []
if vm is None:
return device_list
nw_device_types = (vim.vm.device.VirtualPCNet32, vim.vm.device.VirtualVmxnet2,
vim.vm.device.VirtualVmxnet3, vim.vm.device.VirtualE1000,
vim.vm.device.VirtualE1000e, vim.vm.device.VirtualSriovEthernetCard)
for device in vm.config.hardware.device:
if isinstance(device, nw_device_types):
device_list.append(device)
return device_list
def sanitize_network_params(self):
"""
Sanitize user provided network provided params
Returns: A sanitized list of network params, else fails
"""
network_devices = list()
# Clean up user data here
for network in self.params['networks']:
if 'name' not in network and 'vlan' not in network:
self.module.fail_json(msg="Please specify at least a network name or"
" a VLAN name under VM network list.")
if 'name' in network and self.cache.get_network(network['name']) is None:
self.module.fail_json(msg="Network '%(name)s' does not exist." % network)
elif 'vlan' in network:
dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
for dvp in dvps:
if hasattr(dvp.config.defaultPortConfig, 'vlan') and \
isinstance(dvp.config.defaultPortConfig.vlan.vlanId, int) and \
str(dvp.config.defaultPortConfig.vlan.vlanId) == str(network['vlan']):
network['name'] = dvp.config.name
break
if 'dvswitch_name' in network and \
dvp.config.distributedVirtualSwitch.name == network['dvswitch_name'] and \
dvp.config.name == network['vlan']:
network['name'] = dvp.config.name
break
if dvp.config.name == network['vlan']:
network['name'] = dvp.config.name
break
else:
self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network)
if 'type' in network:
if network['type'] not in ['dhcp', 'static']:
self.module.fail_json(msg="Network type '%(type)s' is not a valid parameter."
" Valid parameters are ['dhcp', 'static']." % network)
if network['type'] != 'static' and ('ip' in network or 'netmask' in network):
self.module.fail_json(msg='Static IP information provided for network "%(name)s",'
' but "type" is set to "%(type)s".' % network)
else:
# Type is optional parameter, if user provided IP or Subnet assume
# network type as 'static'
if 'ip' in network or 'netmask' in network:
network['type'] = 'static'
else:
# User wants network type as 'dhcp'
network['type'] = 'dhcp'
if network.get('type') == 'static':
if 'ip' in network and 'netmask' not in network:
self.module.fail_json(msg="'netmask' is required if 'ip' is"
" specified under VM network list.")
if 'ip' not in network and 'netmask' in network:
self.module.fail_json(msg="'ip' is required if 'netmask' is"
" specified under VM network list.")
validate_device_types = ['pcnet32', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e', 'sriov']
if 'device_type' in network and network['device_type'] not in validate_device_types:
self.module.fail_json(msg="Device type specified '%s' is not valid."
" Please specify correct device"
" type from ['%s']." % (network['device_type'],
"', '".join(validate_device_types)))
if 'mac' in network and not is_mac(network['mac']):
self.module.fail_json(msg="Device MAC address '%s' is invalid."
" Please provide correct MAC address." % network['mac'])
network_devices.append(network)
return network_devices
def configure_network(self, vm_obj):
# Ignore empty networks, this permits to keep networks when deploying a template/cloning a VM
if len(self.params['networks']) == 0:
return
network_devices = self.sanitize_network_params()
# List current device for Clone or Idempotency
current_net_devices = self.get_vm_network_interfaces(vm=vm_obj)
if len(network_devices) < len(current_net_devices):
self.module.fail_json(msg="Given network device list is lesser than current VM device list (%d < %d). "
"Removing interfaces is not allowed"
% (len(network_devices), len(current_net_devices)))
for key in range(0, len(network_devices)):
nic_change_detected = False
network_name = network_devices[key]['name']
if key < len(current_net_devices) and (vm_obj or self.params['template']):
# We are editing existing network devices, this is either when
# are cloning from VM or Template
nic = vim.vm.device.VirtualDeviceSpec()
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
nic.device = current_net_devices[key]
if ('wake_on_lan' in network_devices[key] and
nic.device.wakeOnLanEnabled != network_devices[key].get('wake_on_lan')):
nic.device.wakeOnLanEnabled = network_devices[key].get('wake_on_lan')
nic_change_detected = True
if ('start_connected' in network_devices[key] and
nic.device.connectable.startConnected != network_devices[key].get('start_connected')):
nic.device.connectable.startConnected = network_devices[key].get('start_connected')
nic_change_detected = True
if ('allow_guest_control' in network_devices[key] and
nic.device.connectable.allowGuestControl != network_devices[key].get('allow_guest_control')):
nic.device.connectable.allowGuestControl = network_devices[key].get('allow_guest_control')
nic_change_detected = True
if nic.device.deviceInfo.summary != network_name:
nic.device.deviceInfo.summary = network_name
nic_change_detected = True
if 'device_type' in network_devices[key]:
device = self.device_helper.get_device(network_devices[key]['device_type'], network_name)
device_class = type(device)
if not isinstance(nic.device, device_class):
self.module.fail_json(msg="Changing the device type is not possible when interface is already present. "
"The failing device type is %s" % network_devices[key]['device_type'])
# Changing mac address has no effect when editing interface
if 'mac' in network_devices[key] and nic.device.macAddress != current_net_devices[key].macAddress:
self.module.fail_json(msg="Changing MAC address has not effect when interface is already present. "
"The failing new MAC address is %s" % nic.device.macAddress)
else:
# Default device type is vmxnet3, VMware best practice
device_type = network_devices[key].get('device_type', 'vmxnet3')
nic = self.device_helper.create_nic(device_type,
'Network Adapter %s' % (key + 1),
network_devices[key])
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_change_detected = True
if hasattr(self.cache.get_network(network_name), 'portKeys'):
# VDS switch
pg_obj = None
if 'dvswitch_name' in network_devices[key]:
dvs_name = network_devices[key]['dvswitch_name']
dvs_obj = find_dvs_by_name(self.content, dvs_name)
if dvs_obj is None:
self.module.fail_json(msg="Unable to find distributed virtual switch %s" % dvs_name)
pg_obj = find_dvspg_by_name(dvs_obj, network_name)
if pg_obj is None:
self.module.fail_json(msg="Unable to find distributed port group %s" % network_name)
else:
pg_obj = self.cache.find_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_name)
# TODO: (akasurde) There is no way to find association between resource pool and distributed virtual portgroup
# For now, check if we are able to find distributed virtual switch
if not pg_obj.config.distributedVirtualSwitch:
self.module.fail_json(msg="Failed to find distributed virtual switch which is associated with"
" distributed virtual portgroup '%s'. Make sure hostsystem is associated with"
" the given distributed virtual portgroup. Also, check if user has correct"
" permission to access distributed virtual switch in the given portgroup." % pg_obj.name)
if (nic.device.backing and
(not hasattr(nic.device.backing, 'port') or
(nic.device.backing.port.portgroupKey != pg_obj.key or
nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid))):
nic_change_detected = True
dvs_port_connection = vim.dvs.PortConnection()
dvs_port_connection.portgroupKey = pg_obj.key
# If user specifies distributed port group without associating to the hostsystem on which
# virtual machine is going to be deployed then we get error. We can infer that there is no
# association between given distributed port group and host system.
host_system = self.params.get('esxi_hostname')
if host_system and host_system not in [host.config.host.name for host in pg_obj.config.distributedVirtualSwitch.config.host]:
self.module.fail_json(msg="It seems that host system '%s' is not associated with distributed"
" virtual portgroup '%s'. Please make sure host system is associated"
" with given distributed virtual portgroup" % (host_system, pg_obj.name))
dvs_port_connection.switchUuid = pg_obj.config.distributedVirtualSwitch.uuid
nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
nic.device.backing.port = dvs_port_connection
elif isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
# NSX-T Logical Switch
nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
network_id = self.cache.get_network(network_name).summary.opaqueNetworkId
nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch'
nic.device.backing.opaqueNetworkId = network_id
nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id
nic_change_detected = True
else:
# vSwitch
if not isinstance(nic.device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):
nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic_change_detected = True
net_obj = self.cache.get_network(network_name)
if nic.device.backing.network != net_obj:
nic.device.backing.network = net_obj
nic_change_detected = True
if nic.device.backing.deviceName != network_name:
nic.device.backing.deviceName = network_name
nic_change_detected = True
if nic_change_detected:
# Change to fix the issue found while configuring opaque network
# VMs cloned from a template with opaque network will get disconnected
# Replacing deprecated config parameter with relocation Spec
if isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
self.relospec.deviceChange.append(nic)
else:
self.configspec.deviceChange.append(nic)
self.change_detected = True
def configure_vapp_properties(self, vm_obj):
if len(self.params['vapp_properties']) == 0:
return
for x in self.params['vapp_properties']:
if not x.get('id'):
self.module.fail_json(msg="id is required to set vApp property")
new_vmconfig_spec = vim.vApp.VmConfigSpec()
if vm_obj:
# VM exists
# This is primarily for vcsim/integration tests, unset vAppConfig was not seen on my deployments
orig_spec = vm_obj.config.vAppConfig if vm_obj.config.vAppConfig else new_vmconfig_spec
vapp_properties_current = dict((x.id, x) for x in orig_spec.property)
vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
# each property must have a unique key
# init key counter with max value + 1
all_keys = [x.key for x in orig_spec.property]
new_property_index = max(all_keys) + 1 if all_keys else 0
for property_id, property_spec in vapp_properties_to_change.items():
is_property_changed = False
new_vapp_property_spec = vim.vApp.PropertySpec()
if property_id in vapp_properties_current:
if property_spec.get('operation') == 'remove':
new_vapp_property_spec.operation = 'remove'
new_vapp_property_spec.removeKey = vapp_properties_current[property_id].key
is_property_changed = True
else:
# this is 'edit' branch
new_vapp_property_spec.operation = 'edit'
new_vapp_property_spec.info = vapp_properties_current[property_id]
try:
for property_name, property_value in property_spec.items():
if property_name == 'operation':
# operation is not an info object property
# if set to anything other than 'remove' we don't fail
continue
# Updating attributes only if needed
if getattr(new_vapp_property_spec.info, property_name) != property_value:
setattr(new_vapp_property_spec.info, property_name, property_value)
is_property_changed = True
except Exception as e:
msg = "Failed to set vApp property field='%s' and value='%s'. Error: %s" % (property_name, property_value, to_text(e))
self.module.fail_json(msg=msg)
else:
if property_spec.get('operation') == 'remove':
# attempt to delete non-existent property
continue
# this is add new property branch
new_vapp_property_spec.operation = 'add'
property_info = vim.vApp.PropertyInfo()
property_info.classId = property_spec.get('classId')
property_info.instanceId = property_spec.get('instanceId')
property_info.id = property_spec.get('id')
property_info.category = property_spec.get('category')
property_info.label = property_spec.get('label')
property_info.type = property_spec.get('type', 'string')
property_info.userConfigurable = property_spec.get('userConfigurable', True)
property_info.defaultValue = property_spec.get('defaultValue')
property_info.value = property_spec.get('value', '')
property_info.description = property_spec.get('description')
new_vapp_property_spec.info = property_info
new_vapp_property_spec.info.key = new_property_index
new_property_index += 1
is_property_changed = True
if is_property_changed:
new_vmconfig_spec.property.append(new_vapp_property_spec)
else:
# New VM
all_keys = [x.key for x in new_vmconfig_spec.property]
new_property_index = max(all_keys) + 1 if all_keys else 0
vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
is_property_changed = False
for property_id, property_spec in vapp_properties_to_change.items():
new_vapp_property_spec = vim.vApp.PropertySpec()
# this is add new property branch
new_vapp_property_spec.operation = 'add'
property_info = vim.vApp.PropertyInfo()
property_info.classId = property_spec.get('classId')
property_info.instanceId = property_spec.get('instanceId')
property_info.id = property_spec.get('id')
property_info.category = property_spec.get('category')
property_info.label = property_spec.get('label')
property_info.type = property_spec.get('type', 'string')
property_info.userConfigurable = property_spec.get('userConfigurable', True)
property_info.defaultValue = property_spec.get('defaultValue')
property_info.value = property_spec.get('value', '')
property_info.description = property_spec.get('description')
new_vapp_property_spec.info = property_info
new_vapp_property_spec.info.key = new_property_index
new_property_index += 1
is_property_changed = True
if is_property_changed:
new_vmconfig_spec.property.append(new_vapp_property_spec)
if new_vmconfig_spec.property:
self.configspec.vAppConfig = new_vmconfig_spec
self.change_detected = True
def customize_customvalues(self, vm_obj, config_spec):
if len(self.params['customvalues']) == 0:
return
vm_custom_spec = config_spec
vm_custom_spec.extraConfig = []
changed = False
facts = self.gather_facts(vm_obj)
for kv in self.params['customvalues']:
if 'key' not in kv or 'value' not in kv:
self.module.exit_json(msg="customvalues items required both 'key' and 'value' fields.")
# If kv is not kv fetched from facts, change it
if kv['key'] not in facts['customvalues'] or facts['customvalues'][kv['key']] != kv['value']:
option = vim.option.OptionValue()
option.key = kv['key']
option.value = kv['value']
vm_custom_spec.extraConfig.append(option)
changed = True
if changed:
self.change_detected = True
def customize_vm(self, vm_obj):
# User specified customization specification
custom_spec_name = self.params.get('customization_spec')
if custom_spec_name:
cc_mgr = self.content.customizationSpecManager
if cc_mgr.DoesCustomizationSpecExist(name=custom_spec_name):
temp_spec = cc_mgr.GetCustomizationSpec(name=custom_spec_name)
self.customspec = temp_spec.spec
return
else:
self.module.fail_json(msg="Unable to find customization specification"
" '%s' in given configuration." % custom_spec_name)
# Network settings
adaptermaps = []
for network in self.params['networks']:
guest_map = vim.vm.customization.AdapterMapping()
guest_map.adapter = vim.vm.customization.IPSettings()
if 'ip' in network and 'netmask' in network:
guest_map.adapter.ip = vim.vm.customization.FixedIp()
guest_map.adapter.ip.ipAddress = str(network['ip'])
guest_map.adapter.subnetMask = str(network['netmask'])
elif 'type' in network and network['type'] == 'dhcp':
guest_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()
if 'gateway' in network:
guest_map.adapter.gateway = network['gateway']
# On Windows, DNS domain and DNS servers can be set by network interface
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.IPSettings.html
if 'domain' in network:
guest_map.adapter.dnsDomain = network['domain']
elif 'domain' in self.params['customization']:
guest_map.adapter.dnsDomain = self.params['customization']['domain']
if 'dns_servers' in network:
guest_map.adapter.dnsServerList = network['dns_servers']
elif 'dns_servers' in self.params['customization']:
guest_map.adapter.dnsServerList = self.params['customization']['dns_servers']
adaptermaps.append(guest_map)
# Global DNS settings
globalip = vim.vm.customization.GlobalIPSettings()
if 'dns_servers' in self.params['customization']:
globalip.dnsServerList = self.params['customization']['dns_servers']
# TODO: Maybe list the different domains from the interfaces here by default ?
if 'dns_suffix' in self.params['customization']:
dns_suffix = self.params['customization']['dns_suffix']
if isinstance(dns_suffix, list):
globalip.dnsSuffixList = " ".join(dns_suffix)
else:
globalip.dnsSuffixList = dns_suffix
elif 'domain' in self.params['customization']:
globalip.dnsSuffixList = self.params['customization']['domain']
if self.params['guest_id']:
guest_id = self.params['guest_id']
else:
guest_id = vm_obj.summary.config.guestId
# For windows guest OS, use SysPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.Sysprep.html#field_detail
if 'win' in guest_id:
ident = vim.vm.customization.Sysprep()
ident.userData = vim.vm.customization.UserData()
# Setting hostName, orgName and fullName is mandatory, so we set some default when missing
ident.userData.computerName = vim.vm.customization.FixedName()
# computer name will be truncated to 15 characters if using VM name
default_name = self.params['name'].replace(' ', '')
default_name = ''.join([c for c in default_name if c not in string.punctuation])
ident.userData.computerName.name = str(self.params['customization'].get('hostname', default_name[0:15]))
ident.userData.fullName = str(self.params['customization'].get('fullname', 'Administrator'))
ident.userData.orgName = str(self.params['customization'].get('orgname', 'ACME'))
if 'productid' in self.params['customization']:
ident.userData.productId = str(self.params['customization']['productid'])
ident.guiUnattended = vim.vm.customization.GuiUnattended()
if 'autologon' in self.params['customization']:
ident.guiUnattended.autoLogon = self.params['customization']['autologon']
ident.guiUnattended.autoLogonCount = self.params['customization'].get('autologoncount', 1)
if 'timezone' in self.params['customization']:
# Check if timezone value is a int before proceeding.
ident.guiUnattended.timeZone = self.device_helper.integer_value(
self.params['customization']['timezone'],
'customization.timezone')
ident.identification = vim.vm.customization.Identification()
if self.params['customization'].get('password', '') != '':
ident.guiUnattended.password = vim.vm.customization.Password()
ident.guiUnattended.password.value = str(self.params['customization']['password'])
ident.guiUnattended.password.plainText = True
if 'joindomain' in self.params['customization']:
if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']:
self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use "
"joindomain feature")
ident.identification.domainAdmin = str(self.params['customization']['domainadmin'])
ident.identification.joinDomain = str(self.params['customization']['joindomain'])
ident.identification.domainAdminPassword = vim.vm.customization.Password()
ident.identification.domainAdminPassword.value = str(self.params['customization']['domainadminpassword'])
ident.identification.domainAdminPassword.plainText = True
elif 'joinworkgroup' in self.params['customization']:
ident.identification.joinWorkgroup = str(self.params['customization']['joinworkgroup'])
if 'runonce' in self.params['customization']:
ident.guiRunOnce = vim.vm.customization.GuiRunOnce()
ident.guiRunOnce.commandList = self.params['customization']['runonce']
else:
# FIXME: We have no clue whether this non-Windows OS is actually Linux, hence it might fail!
# For Linux guest OS, use LinuxPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.LinuxPrep.html
ident = vim.vm.customization.LinuxPrep()
# TODO: Maybe add domain from interface if missing ?
if 'domain' in self.params['customization']:
ident.domain = str(self.params['customization']['domain'])
ident.hostName = vim.vm.customization.FixedName()
hostname = str(self.params['customization'].get('hostname', self.params['name'].split('.')[0]))
# Remove all characters except alphanumeric and minus which is allowed by RFC 952
valid_hostname = re.sub(r"[^a-zA-Z0-9\-]", "", hostname)
ident.hostName.name = valid_hostname
# List of supported time zones for different vSphere versions in Linux/Unix systems
# https://kb.vmware.com/s/article/2145518
if 'timezone' in self.params['customization']:
ident.timeZone = str(self.params['customization']['timezone'])
if 'hwclockUTC' in self.params['customization']:
ident.hwClockUTC = self.params['customization']['hwclockUTC']
self.customspec = vim.vm.customization.Specification()
self.customspec.nicSettingMap = adaptermaps
self.customspec.globalIPSettings = globalip
self.customspec.identity = ident
def get_vm_scsi_controller(self, vm_obj):
# If vm_obj doesn't exist there is no SCSI controller to find
if vm_obj is None:
return None
for device in vm_obj.config.hardware.device:
if self.device_helper.is_scsi_controller(device):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.device = device
return scsi_ctl
return None
def get_configured_disk_size(self, expected_disk_spec):
# what size is it?
if [x for x in expected_disk_spec.keys() if x.startswith('size_') or x == 'size']:
# size, size_tb, size_gb, size_mb, size_kb
if 'size' in expected_disk_spec:
size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])')
disk_size_m = size_regex.match(expected_disk_spec['size'])
try:
if disk_size_m:
expected = disk_size_m.group(1)
unit = disk_size_m.group(2)
else:
raise ValueError
if re.match(r'\d+\.\d+', expected):
# We found float value in string, let's typecast it
expected = float(expected)
else:
# We found int value in string, let's typecast it
expected = int(expected)
if not expected or not unit:
raise ValueError
except (TypeError, ValueError, NameError):
# Common failure
self.module.fail_json(msg="Failed to parse disk size please review value"
" provided using documentation.")
else:
param = [x for x in expected_disk_spec.keys() if x.startswith('size_')][0]
unit = param.split('_')[-1].lower()
expected = [x[1] for x in expected_disk_spec.items() if x[0].startswith('size_')][0]
expected = int(expected)
disk_units = dict(tb=3, gb=2, mb=1, kb=0)
if unit in disk_units:
unit = unit.lower()
return expected * (1024 ** disk_units[unit])
else:
self.module.fail_json(msg="%s is not a supported unit for disk size."
" Supported units are ['%s']." % (unit,
"', '".join(disk_units.keys())))
# No size found but disk, fail
self.module.fail_json(
msg="No size, size_kb, size_mb, size_gb or size_tb attribute found into disk configuration")
def find_vmdk(self, vmdk_path):
"""
Takes a vsphere datastore path in the format
[datastore_name] path/to/file.vmdk
Returns vsphere file object or raises RuntimeError
"""
datastore_name, vmdk_fullpath, vmdk_filename, vmdk_folder = self.vmdk_disk_path_split(vmdk_path)
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
if datastore is None:
self.module.fail_json(msg="Failed to find the datastore %s" % datastore_name)
return self.find_vmdk_file(datastore, vmdk_fullpath, vmdk_filename, vmdk_folder)
def add_existing_vmdk(self, vm_obj, expected_disk_spec, diskspec, scsi_ctl):
"""
Adds vmdk file described by expected_disk_spec['filename'], retrieves the file
information and adds the correct spec to self.configspec.deviceChange.
"""
filename = expected_disk_spec['filename']
# if this is a new disk, or the disk file names are different
if (vm_obj and diskspec.device.backing.fileName != filename) or vm_obj is None:
vmdk_file = self.find_vmdk(expected_disk_spec['filename'])
diskspec.device.backing.fileName = expected_disk_spec['filename']
diskspec.device.capacityInKB = VmomiSupport.vmodlTypes['long'](vmdk_file.fileSize / 1024)
diskspec.device.key = -1
self.change_detected = True
self.configspec.deviceChange.append(diskspec)
def configure_disks(self, vm_obj):
# Ignore empty disk list, this permits to keep disks when deploying a template/cloning a VM
if len(self.params['disk']) == 0:
return
scsi_ctl = self.get_vm_scsi_controller(vm_obj)
# Create scsi controller only if we are deploying a new VM, not a template or reconfiguring
if vm_obj is None or scsi_ctl is None:
scsi_ctl = self.device_helper.create_scsi_controller(self.get_scsi_type())
self.change_detected = True
self.configspec.deviceChange.append(scsi_ctl)
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] \
if vm_obj is not None else None
if disks is not None and self.params.get('disk') and len(self.params.get('disk')) < len(disks):
self.module.fail_json(msg="Provided disks configuration has less disks than "
"the target object (%d vs %d)" % (len(self.params.get('disk')), len(disks)))
disk_index = 0
for expected_disk_spec in self.params.get('disk'):
disk_modified = False
# If we are manipulating and existing objects which has disks and disk_index is in disks
if vm_obj is not None and disks is not None and disk_index < len(disks):
diskspec = vim.vm.device.VirtualDeviceSpec()
# set the operation to edit so that it knows to keep other settings
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
diskspec.device = disks[disk_index]
else:
diskspec = self.device_helper.create_scsi_disk(scsi_ctl, disk_index)
disk_modified = True
# increment index for next disk search
disk_index += 1
# index 7 is reserved to SCSI controller
if disk_index == 7:
disk_index += 1
if 'disk_mode' in expected_disk_spec:
disk_mode = expected_disk_spec.get('disk_mode', 'persistent').lower()
valid_disk_mode = ['persistent', 'independent_persistent', 'independent_nonpersistent']
if disk_mode not in valid_disk_mode:
self.module.fail_json(msg="disk_mode specified is not valid."
" Should be one of ['%s']" % "', '".join(valid_disk_mode))
if (vm_obj and diskspec.device.backing.diskMode != disk_mode) or (vm_obj is None):
diskspec.device.backing.diskMode = disk_mode
disk_modified = True
else:
diskspec.device.backing.diskMode = "persistent"
# is it thin?
if 'type' in expected_disk_spec:
| disk_type = expected_disk_spec.get('type', '').lower() | 9,014 | lcc_e | python | null | 062118817bdc9a092da1d4524629824f4e88305dc9eb112e |
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
from operator import itemgetter
from itertools import groupby
from osv import fields, osv
from tools.translate import _
import netsvc
import tools
from tools import float_compare
import decimal_precision as dp
import logging
#----------------------------------------------------------
# Incoterms
#----------------------------------------------------------
class stock_incoterms(osv.osv):
_name = "stock.incoterms"
_description = "Incoterms"
_columns = {
'name': fields.char('Name', size=64, required=True, help="Incoterms are series of sales terms.They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."),
'code': fields.char('Code', size=3, required=True, help="Code for Incoterms"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM without deleting it."),
}
_defaults = {
'active': True,
}
stock_incoterms()
class stock_journal(osv.osv):
_name = "stock.journal"
_description = "Stock Journal"
_columns = {
'name': fields.char('Stock Journal', size=32, required=True),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'user_id': lambda s, c, u, ctx: u
}
stock_journal()
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_name = "stock.location"
_description = "Location"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'posz,name'
_order = 'parent_left'
def name_get(self, cr, uid, ids, context=None):
# always return the full hierarchical name
res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context)
return res.items()
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
names = [m.name]
parent = m.location_id
while parent:
names.append(parent.name)
parent = parent.location_id
res[m.id] = ' / '.join(reversed(names))
return res
def _get_sublocations(self, cr, uid, ids, context=None):
""" return all sublocations of the given stock locations (included) """
return self.search(cr, uid, [('id', 'child_of', ids)], context=context)
def _product_value(self, cr, uid, ids, field_names, arg, context=None):
"""Computes stock value (real and virtual) for a product, as well as stock qty (real and virtual).
@param field_names: Name of field
@return: Dictionary of values
"""
prod_id = context and context.get('product_id', False)
if not prod_id:
return dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])
product_product_obj = self.pool.get('product.product')
cr.execute('select distinct product_id, location_id from stock_move where location_id in %s', (tuple(ids), ))
dict1 = cr.dictfetchall()
cr.execute('select distinct product_id, location_dest_id as location_id from stock_move where location_dest_id in %s', (tuple(ids), ))
dict2 = cr.dictfetchall()
res_products_by_location = sorted(dict1+dict2, key=itemgetter('location_id'))
products_by_location = dict((k, [v['product_id'] for v in itr]) for k, itr in groupby(res_products_by_location, itemgetter('location_id')))
result = dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])
result.update(dict([(i, {}.fromkeys(field_names, 0.0)) for i in list(set([aaa['location_id'] for aaa in res_products_by_location]))]))
currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id
currency_obj = self.pool.get('res.currency')
currency = currency_obj.browse(cr, uid, currency_id, context=context)
for loc_id, product_ids in products_by_location.items():
if prod_id:
product_ids = [prod_id]
c = (context or {}).copy()
c['location'] = loc_id
for prod in product_product_obj.browse(cr, uid, product_ids, context=c):
for f in field_names:
if f == 'stock_real':
if loc_id not in result:
result[loc_id] = {}
result[loc_id][f] += prod.qty_available
elif f == 'stock_virtual':
result[loc_id][f] += prod.virtual_available
elif f == 'stock_real_value':
amount = prod.qty_available * prod.standard_price
amount = currency_obj.round(cr, uid, currency, amount)
result[loc_id][f] += amount
elif f == 'stock_virtual_value':
amount = prod.virtual_available * prod.standard_price
amount = currency_obj.round(cr, uid, currency, amount)
result[loc_id][f] += amount
return result
_columns = {
'name': fields.char('Location Name', size=64, required=True, translate=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."),
'usage': fields.selection([('supplier', 'Supplier Location'), ('view', 'View'), ('internal', 'Internal Location'), ('customer', 'Customer Location'), ('inventory', 'Inventory'), ('procurement', 'Procurement'), ('production', 'Production'), ('transit', 'Transit Location for Inter-Companies Transfers')], 'Location Type', required=True,
help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers
\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products
\n* Internal Location: Physical locations inside your own warehouses,
\n* Customer Location: Virtual location representing the destination location for products sent to your customers
\n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)
\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running.
\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products
""", select = True),
# temporarily removed, as it's unused: 'allocation_method': fields.selection([('fifo', 'FIFO'), ('lifo', 'LIFO'), ('nearest', 'Nearest')], 'Allocation Method', required=True),
'complete_name': fields.function(_complete_name, type='char', size=256, string="Location Name",
store={'stock.location': (_get_sublocations, ['name', 'location_id'], 10)}),
'stock_real': fields.function(_product_value, type='float', string='Real Stock', multi="stock"),
'stock_virtual': fields.function(_product_value, type='float', string='Virtual Stock', multi="stock"),
'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'),
'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'),
'chained_journal_id': fields.many2one('stock.journal', 'Chaining Journal',help="Inventory Journal in which the chained move will be written, if the Chaining Type is not Transparent (no journal is used if left empty)"),
'chained_location_id': fields.many2one('stock.location', 'Chained Location If Fixed'),
'chained_location_type': fields.selection([('none', 'None'), ('customer', 'Customer'), ('fixed', 'Fixed Location')],
'Chained Location Type', required=True,
help="Determines whether this location is chained to another location, i.e. any incoming product in this location \n" \
"should next go to the chained location. The chained location is determined according to the type :"\
"\n* None: No chaining at all"\
"\n* Customer: The chained location will be taken from the Customer Location field on the Partner form of the Partner that is specified in the Picking list of the incoming products." \
"\n* Fixed Location: The chained location is taken from the next field: Chained Location if Fixed." \
),
'chained_auto_packing': fields.selection(
[('auto', 'Automatic Move'), ('manual', 'Manual Operation'), ('transparent', 'Automatic No Step Added')],
'Chaining Type',
required=True,
help="This is used only if you select a chained location type.\n" \
"The 'Automatic Move' value will create a stock move after the current one that will be "\
"validated automatically. With 'Manual Operation', the stock move has to be validated "\
"by a worker. With 'Automatic No Step Added', the location is replaced in the original move."
),
'chained_picking_type': fields.selection([('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], 'Shipping Type', help="Shipping Type of the Picking List that will contain the chained move (leave empty to automatically detect the type based on the source and destination locations)."),
'chained_company_id': fields.many2one('res.company', 'Chained Company', help='The company the Picking List containing the chained move will belong to (leave empty to use the default company determination rules'),
'chained_delay': fields.integer('Chaining Lead Time',help="Delay between original move and chained move in days"),
'address_id': fields.many2one('res.partner.address', 'Location Address',help="Address of customer or supplier."),
'icon': fields.selection(tools.icons, 'Icon', size=64,help="Icon show in hierarchical tree view"),
'comment': fields.text('Additional Information'),
'posx': fields.integer('Corridor (X)',help="Optional localization details, for information purpose only"),
'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"),
'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'stock_real_value': fields.function(_product_value, type='float', string='Real Stock Value', multi="stock", digits_compute=dp.get_precision('Account')),
'stock_virtual_value': fields.function(_product_value, type='float', string='Virtual Stock Value', multi="stock", digits_compute=dp.get_precision('Account')),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between all companies'),
'scrap_location': fields.boolean('Scrap Location', help='Check this box to allow using this location to put scrapped/damaged goods.'),
'valuation_in_account_id': fields.many2one('account.account', 'Stock Valuation Account (Incoming)', domain = [('type','=','other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved from an internal location "
"into this location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
'valuation_out_account_id': fields.many2one('account.account', 'Stock Valuation Account (Outgoing)', domain = [('type','=','other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved out of this location "
"and into an internal location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
}
_defaults = {
'active': True,
'usage': 'internal',
'chained_location_type': 'none',
'chained_auto_packing': 'manual',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c),
'posx': 0,
'posy': 0,
'posz': 0,
'icon': False,
'scrap_location': False,
}
def chained_location_get(self, cr, uid, location, partner=None, product=None, context=None):
""" Finds chained location
@param location: Location id
@param partner: Partner id
@param product: Product id
@return: List of values
"""
result = None
if location.chained_location_type == 'customer':
if partner:
result = partner.property_stock_customer
elif location.chained_location_type == 'fixed':
result = location.chained_location_id
if result:
return result, location.chained_auto_packing, location.chained_delay, location.chained_journal_id and location.chained_journal_id.id or False, location.chained_company_id and location.chained_company_id.id or False, location.chained_picking_type
return result
def picking_type_get(self, cr, uid, from_location, to_location, context=None):
""" Gets type of picking.
@param from_location: Source location
@param to_location: Destination location
@return: Location type
"""
result = 'internal'
if (from_location.usage=='internal') and (to_location and to_location.usage in ('customer', 'supplier')):
result = 'out'
elif (from_location.usage in ('supplier', 'customer')) and (to_location.usage == 'internal'):
result = 'in'
return result
def _product_get_all_report(self, cr, uid, ids, product_ids=False, context=None):
return self._product_get_report(cr, uid, ids, product_ids, context, recursive=True)
def _product_get_report(self, cr, uid, ids, product_ids=False,
context=None, recursive=False):
""" Finds the product quantity and price for particular location.
@param product_ids: Ids of product
@param recursive: True or False
@return: Dictionary of values
"""
if context is None:
context = {}
product_obj = self.pool.get('product.product')
# Take the user company and pricetype
context['currency_id'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
# To be able to offer recursive or non-recursive reports we need to prevent recursive quantities by default
context['compute_child'] = False
if not product_ids:
product_ids = product_obj.search(cr, uid, [], context={'active_test': False})
products = product_obj.browse(cr, uid, product_ids, context=context)
products_by_uom = {}
products_by_id = {}
for product in products:
products_by_uom.setdefault(product.uom_id.id, [])
products_by_uom[product.uom_id.id].append(product)
products_by_id.setdefault(product.id, [])
products_by_id[product.id] = product
result = {}
result['product'] = []
for id in ids:
quantity_total = 0.0
total_price = 0.0
for uom_id in products_by_uom.keys():
fnc = self._product_get
if recursive:
fnc = self._product_all_get
ctx = context.copy()
ctx['uom'] = uom_id
qty = fnc(cr, uid, id, [x.id for x in products_by_uom[uom_id]],
context=ctx)
for product_id in qty.keys():
if not qty[product_id]:
continue
product = products_by_id[product_id]
quantity_total += qty[product_id]
# Compute based on pricetype
# Choose the right filed standard_price to read
amount_unit = product.price_get('standard_price', context=context)[product.id]
price = qty[product_id] * amount_unit
total_price += price
result['product'].append({
'price': amount_unit,
'prod_name': product.name,
'code': product.default_code, # used by lot_overview_all report!
'variants': product.variants or '',
'uom': product.uom_id.name,
'prod_qty': qty[product_id],
'price_value': price,
})
result['total'] = quantity_total
result['total_price'] = total_price
return result
def _product_get_multi_location(self, cr, uid, ids, product_ids=False, context=None,
states=['done'], what=('in', 'out')):
"""
@param product_ids: Ids of product
@param states: List of states
@param what: Tuple of
@return:
"""
product_obj = self.pool.get('product.product')
if context is None:
context = {}
context.update({
'states': states,
'what': what,
'location': ids
})
return product_obj.get_product_available(cr, uid, product_ids, context=context)
def _product_get(self, cr, uid, id, product_ids=False, context=None, states=['done']):
"""
@param product_ids:
@param states:
@return:
"""
ids = id and [id] or []
return self._product_get_multi_location(cr, uid, ids, product_ids, context=context, states=states)
def _product_all_get(self, cr, uid, id, product_ids=False, context=None, states=['done']):
# build the list of ids of children of the location given by id
ids = id and [id] or []
location_ids = self.search(cr, uid, [('location_id', 'child_of', ids)])
return self._product_get_multi_location(cr, uid, location_ids, product_ids, context, states)
def _product_virtual_get(self, cr, uid, id, product_ids=False, context=None, states=['done']):
return self._product_all_get(cr, uid, id, product_ids, context, ['confirmed', 'waiting', 'assigned', 'done'])
def _product_reserve(self, cr, uid, ids, product_id, product_qty, context=None, lock=False):
"""
Attempt to find a quantity ``product_qty`` (in the product's default uom or the uom passed in ``context``) of product ``product_id``
in locations with id ``ids`` and their child locations. If ``lock`` is True, the stock.move lines
of product with id ``product_id`` in the searched location will be write-locked using Postgres's
"FOR UPDATE NOWAIT" option until the transaction is committed or rolled back, to prevent reservin
twice the same products.
If ``lock`` is True and the lock cannot be obtained (because another transaction has locked some of
the same stock.move lines), a log line will be output and False will be returned, as if there was
not enough stock.
:param product_id: Id of product to reserve
:param product_qty: Quantity of product to reserve (in the product's default uom or the uom passed in ``context``)
:param lock: if True, the stock.move lines of product with id ``product_id`` in all locations (and children locations) with ``ids`` will
be write-locked using postgres's "FOR UPDATE NOWAIT" option until the transaction is committed or rolled back. This is
to prevent reserving twice the same products.
:param context: optional context dictionary: if a 'uom' key is present it will be used instead of the default product uom to
compute the ``product_qty`` and in the return value.
:return: List of tuples in the form (qty, location_id) with the (partial) quantities that can be taken in each location to
reach the requested product_qty (``qty`` is expressed in the default uom of the product), of False if enough
products could not be found, or the lock could not be obtained (and ``lock`` was True).
"""
result = []
amount = 0.0
if context is None:
context = {}
uom_obj = self.pool.get('product.uom')
uom_rounding = self.pool.get('product.product').browse(cr, uid, product_id, context=context).uom_id.rounding
if context.get('uom'):
uom_rounding = uom_obj.browse(cr, uid, context.get('uom'), context=context).rounding
for id in self.search(cr, uid, [('location_id', 'child_of', ids)]):
if lock:
try:
# Must lock with a separate select query because FOR UPDATE can't be used with
# aggregation/group by's (when individual rows aren't identifiable).
# We use a SAVEPOINT to be able to rollback this part of the transaction without
# failing the whole transaction in case the LOCK cannot be acquired.
cr.execute("SAVEPOINT stock_location_product_reserve")
cr.execute("""SELECT id FROM stock_move
WHERE product_id=%s AND
(
(location_dest_id=%s AND
location_id<>%s AND
state='done')
OR
(location_id=%s AND
location_dest_id<>%s AND
state in ('done', 'assigned'))
)
FOR UPDATE of stock_move NOWAIT""", (product_id, id, id, id, id), log_exceptions=False)
except Exception:
# Here it's likely that the FOR UPDATE NOWAIT failed to get the LOCK,
# so we ROLLBACK to the SAVEPOINT to restore the transaction to its earlier
# state, we return False as if the products were not available, and log it:
cr.execute("ROLLBACK TO stock_location_product_reserve")
logger = logging.getLogger('stock.location')
logger.warn("Failed attempt to reserve %s x product %s, likely due to another transaction already in progress. Next attempt is likely to work. Detailed error available at DEBUG level.", product_qty, product_id)
logger.debug("Trace of the failed product reservation attempt: ", exc_info=True)
return False
# XXX TODO: rewrite this with one single query, possibly even the quantity conversion
cr.execute("""SELECT product_uom, sum(product_qty) AS product_qty
FROM stock_move
WHERE location_dest_id=%s AND
location_id<>%s AND
product_id=%s AND
state='done'
GROUP BY product_uom
""",
(id, id, product_id))
results = cr.dictfetchall()
cr.execute("""SELECT product_uom,-sum(product_qty) AS product_qty
FROM stock_move
WHERE location_id=%s AND
location_dest_id<>%s AND
product_id=%s AND
state in ('done', 'assigned')
GROUP BY product_uom
""",
(id, id, product_id))
results += cr.dictfetchall()
total = 0.0
results2 = 0.0
for r in results:
amount = uom_obj._compute_qty(cr, uid, r['product_uom'], r['product_qty'], context.get('uom', False))
results2 += amount
total += amount
if total <= 0.0:
continue
amount = results2
compare_qty = float_compare(amount, 0, precision_rounding=uom_rounding)
if compare_qty == 1:
if amount > min(total, product_qty):
amount = min(product_qty, total)
result.append((amount, id))
product_qty -= amount
total -= amount
if product_qty <= 0.0:
return result
if total <= 0.0:
continue
return False
stock_location()
class stock_tracking(osv.osv):
_name = "stock.tracking"
_description = "Packs"
def checksum(sscc):
salt = '31' * 8 + '3'
sum = 0
for sscc_part, salt_part in zip(sscc, salt):
sum += int(sscc_part) * int(salt_part)
return (10 - (sum % 10)) % 10
checksum = staticmethod(checksum)
def make_sscc(self, cr, uid, context=None):
sequence = self.pool.get('ir.sequence').get(cr, uid, 'stock.lot.tracking')
try:
return sequence + str(self.checksum(sequence))
except Exception:
return sequence
_columns = {
'name': fields.char('Pack Reference', size=64, required=True, select=True, help="By default, the pack reference is generated following the sscc standard. (Serial number + 1 check digit)"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a pack without deleting it."),
'serial': fields.char('Additional Reference', size=64, select=True, help="Other reference or serial number"),
'move_ids': fields.one2many('stock.move', 'tracking_id', 'Moves for this pack', readonly=True),
'date': fields.datetime('Creation Date', required=True),
}
_defaults = {
'active': 1,
'name': make_sscc,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
}
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
ids = self.search(cr, user, [('serial', '=', name)]+ args, limit=limit, context=context)
ids += self.search(cr, user, [('name', operator, name)]+ args, limit=limit, context=context)
return self.name_get(cr, user, ids, context)
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
res = [(r['id'], r['name']+' ['+(r['serial'] or '')+']') for r in self.read(cr, uid, ids, ['name', 'serial'], context)]
return res
def unlink(self, cr, uid, ids, context=None):
raise osv.except_osv(_('Error'), _('You can not remove a lot line !'))
def action_traceability(self, cr, uid, ids, context={}):
""" It traces the information of a product
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
return self.pool.get('action.traceability').action_traceability(cr,uid,ids,context)
stock_tracking()
#----------------------------------------------------------
# Stock Picking
#----------------------------------------------------------
class stock_picking(osv.osv):
_name = "stock.picking"
_description = "Picking List"
def _set_maximum_date(self, cr, uid, ids, name, value, arg, context=None):
""" Calculates planned date if it is greater than 'value'.
@param name: Name of field
@param value: Value of field
@param arg: User defined argument
@return: True or False
"""
if not value:
return False
if isinstance(ids, (int, long)):
ids = [ids]
for pick in self.browse(cr, uid, ids, context=context):
sql_str = """update stock_move set
date='%s'
where
picking_id=%d """ % (value, pick.id)
if pick.max_date:
sql_str += " and (date='" + pick.max_date + "' or date>'" + value + "')"
cr.execute(sql_str)
return True
def _set_minimum_date(self, cr, uid, ids, name, value, arg, context=None):
""" Calculates planned date if it is less than 'value'.
@param name: Name of field
@param value: Value of field
@param arg: User defined argument
@return: True or False
"""
if not value:
return False
if isinstance(ids, (int, long)):
ids = [ids]
for pick in self.browse(cr, uid, ids, context=context):
sql_str = """update stock_move set
date='%s'
where
picking_id=%s """ % (value, pick.id)
if pick.min_date:
sql_str += " and (date='" + pick.min_date + "' or date<'" + value + "')"
cr.execute(sql_str)
return True
def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None):
""" Finds minimum and maximum dates for picking.
@return: Dictionary of values
"""
res = {}
for id in ids:
res[id] = {'min_date': False, 'max_date': False}
if not ids:
return res
cr.execute("""select
picking_id,
min(date_expected),
max(date_expected)
from
stock_move
where
picking_id IN %s
group by
picking_id""",(tuple(ids),))
for pick, dt1, dt2 in cr.fetchall():
res[pick]['min_date'] = dt1
res[pick]['max_date'] = dt2
return res
def create(self, cr, user, vals, context=None):
if ('name' not in vals) or (vals.get('name')=='/'):
seq_obj_name = 'stock.picking.' + vals['type']
vals['name'] = self.pool.get('ir.sequence').get(cr, user, seq_obj_name)
new_id = super(stock_picking, self).create(cr, user, vals, context)
return new_id
_columns = {
'name': fields.char('Reference', size=64, select=True),
'origin': fields.char('Origin', size=64, help="Reference of the document that produced this picking.", select=True),
'backorder_id': fields.many2one('stock.picking', 'Back Order of', help="If this picking was split this field links to the picking that contains the other part that has been processed already.", select=True),
'type': fields.selection([('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], 'Shipping Type', required=True, select=True, help="Shipping type specify, goods coming in or going out."),
'note': fields.text('Notes'),
'stock_journal_id': fields.many2one('stock.journal','Stock Journal', select=True),
'location_id': fields.many2one('stock.location', 'Location', help="Keep empty if you produce at the location where the finished products are needed." \
"Set a location if you produce at a fixed location. This can be a partner location " \
"if you subcontract the manufacturing operations.", select=True),
'location_dest_id': fields.many2one('stock.location', 'Dest. Location',help="Location where the system will stock the finished products.", select=True),
'move_type': fields.selection([('direct', 'Partial Delivery'), ('one', 'All at once')], 'Delivery Method', required=True, help="It specifies goods to be delivered all at once or by direct delivery"),
'state': fields.selection([
('draft', 'New'),
('auto', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('assigned', 'Ready to Process'),
('done', 'Done'),
('cancel', 'Cancelled'),
], 'State', readonly=True, select=True,
help="* Draft: not confirmed yet and will not be scheduled until confirmed\n"\
"* Confirmed: still waiting for the availability of products\n"\
"* Available: products reserved, simply waiting for confirmation.\n"\
"* Waiting: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n"\
"* Done: has been processed, can't be modified or cancelled anymore\n"\
"* Cancelled: has been cancelled, can't be confirmed anymore"),
'min_date': fields.function(get_min_max_date, fnct_inv=_set_minimum_date, multi="min_max_date",
store=True, type='datetime', string='Expected Date', select=1, help="Expected date for the picking to be processed"),
'date': fields.datetime('Order Date', help="Date of Order", select=True),
'date_done': fields.datetime('Date Done', help="Date of Completion"),
'max_date': fields.function(get_min_max_date, fnct_inv=_set_maximum_date, multi="min_max_date",
store=True, type='datetime', string='Max. Expected Date', select=2),
'move_lines': fields.one2many('stock.move', 'picking_id', 'Internal Moves', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'auto_picking': fields.boolean('Auto-Picking'),
'address_id': fields.many2one('res.partner.address', 'Address', help="Address of partner"),
'partner_id': fields.related('address_id','partner_id',type='many2one',relation='res.partner',string='Partner',store=True),
'invoice_state': fields.selection([
("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")], "Invoice Control",
select=True, required=True, readonly=True, states={'draft': [('readonly', False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
}
_defaults = {
'name': lambda self, cr, uid, context: '/',
'state': 'draft',
'move_type': 'direct',
'type': 'in',
'invoice_state': 'none',
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c)
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
]
def action_process(self, cr, uid, ids, context=None):
if context is None: context = {}
context = dict(context, active_ids=ids, active_model=self._name)
partial_id = self.pool.get("stock.partial.picking").create(cr, uid, {}, context=context)
return {
'name':_("Products to Process"),
'view_mode': 'form',
'view_id': False,
'view_type': 'form',
'res_model': 'stock.partial.picking',
'res_id': partial_id,
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'new',
'domain': '[]',
'context': context,
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
picking_obj = self.browse(cr, uid, id, context=context)
move_obj=self.pool.get('stock.move')
if ('name' not in default) or (picking_obj.name=='/'):
seq_obj_name = 'stock.picking.' + picking_obj.type
default['name'] = self.pool.get('ir.sequence').get(cr, uid, seq_obj_name)
default['origin'] = ''
default['backorder_id'] = False
if picking_obj.invoice_state == 'invoiced':
default['invoice_state'] = '2binvoiced'
res=super(stock_picking, self).copy(cr, uid, id, default, context)
if res:
picking_obj = self.browse(cr, uid, res, context=context)
for move in picking_obj.move_lines:
move_obj.write(cr, uid, [move.id], {'tracking_id': False,'prodlot_id':False, 'move_history_ids2': [(6, 0, [])], 'move_history_ids': [(6, 0, [])]})
return res
def onchange_partner_in(self, cr, uid, context=None, partner_id=None):
return {}
def action_explode(self, cr, uid, moves, context=None):
return moves
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms picking.
@return: True
"""
self.write(cr, uid, ids, {'state': 'confirmed'})
todo = []
for picking in self.browse(cr, uid, ids, context=context):
for r in picking.move_lines:
if r.state == 'draft':
todo.append(r.id)
self.log_picking(cr, uid, ids, context=context)
todo = self.action_explode(cr, uid, todo, context)
if len(todo):
self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context)
return True
def test_auto_picking(self, cr, uid, ids):
# TODO: Check locations to see if in the same location ?
return True
def action_assign(self, cr, uid, ids, *args):
""" Changes state of picking to available if all moves are confirmed.
@return: True
"""
for pick in self.browse(cr, uid, ids):
move_ids = [x.id for x in pick.move_lines if x.state == 'confirmed']
if not move_ids:
raise osv.except_osv(_('Warning !'),_('Not enough stock, unable to reserve the products.'))
self.pool.get('stock.move').action_assign(cr, uid, move_ids)
return True
def force_assign(self, cr, uid, ids, *args):
""" Changes state of picking to available if moves are confirmed or waiting.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed','waiting']]
self.pool.get('stock.move').force_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return True
def draft_force_assign(self, cr, uid, ids, *args):
""" Confirms picking directly from draft state.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
if not pick.move_lines:
raise osv.except_osv(_('Error !'),_('You can not process picking without stock moves'))
wf_service.trg_validate(uid, 'stock.picking', pick.id,
'button_confirm', cr)
return True
def draft_validate(self, cr, uid, ids, context=None):
""" Validates picking directly from draft state.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
self.draft_force_assign(cr, uid, ids)
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [x.id for x in pick.move_lines]
self.pool.get('stock.move').force_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return self.action_process(
cr, uid, ids, context=context)
def cancel_assign(self, cr, uid, ids, *args):
""" Cancels picking and moves.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
move_ids = [x.id for x in pick.move_lines]
self.pool.get('stock.move').cancel_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return True
def action_assign_wkf(self, cr, uid, ids, context=None):
""" Changes picking state to assigned.
@return: True
"""
self.write(cr, uid, ids, {'state': 'assigned'})
self.log_picking(cr, uid, ids, context=context)
return True
def test_finished(self, cr, uid, ids):
""" Tests whether the move is in done or cancel state or not.
@return: True or False
"""
move_ids = self.pool.get('stock.move').search(cr, uid, [('picking_id', 'in', ids)])
for move in self.pool.get('stock.move').browse(cr, uid, move_ids):
if move.state not in ('done', 'cancel'):
if move.product_qty != 0.0:
return False
else:
move.write({'state': 'done'})
return True
def test_assigned(self, cr, uid, ids):
""" Tests whether the move is in assigned state or not.
@return: True or False
"""
#TOFIX: assignment of move lines should be call before testing assigment otherwise picking never gone in assign state
ok = True
for pick in self.browse(cr, uid, ids):
mt = pick.move_type
for move in pick.move_lines:
if (move.state in ('confirmed', 'draft')) and (mt == 'one'):
return False
if (mt == 'direct') and (move.state == 'assigned') and (move.product_qty):
return True
ok = ok and (move.state in ('cancel', 'done', 'assigned'))
return ok
def action_cancel(self, cr, uid, ids, context=None):
""" Changes picking state to cancel.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
ids2 = [move.id for move in pick.move_lines]
self.pool.get('stock.move').action_cancel(cr, uid, ids2, context)
self.write(cr, uid, ids, {'state': 'cancel', 'invoice_state': 'none'})
self.log_picking(cr, uid, ids, context=context)
return True
#
# TODO: change and create a move if not parents
#
def action_done(self, cr, uid, ids, context=None):
""" Changes picking state to done.
@return: True
"""
self.write(cr, uid, ids, {'state': 'done', 'date_done': time.strftime('%Y-%m-%d %H:%M:%S')})
return True
def action_move(self, cr, uid, ids, context=None):
""" Changes move state to assigned.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
todo = []
for move in pick.move_lines:
if move.state == 'draft':
self.pool.get('stock.move').action_confirm(cr, uid, [move.id],
context=context)
todo.append(move.id)
elif move.state in ('assigned','confirmed'):
todo.append(move.id)
if len(todo):
self.pool.get('stock.move').action_done(cr, uid, todo,
context=context)
return True
def get_currency_id(self, cr, uid, picking):
return False
def _get_partner_to_invoice(self, cr, uid, picking, context=None):
""" Gets the partner that will be invoiced
Note that this function is inherited in the sale module
@param picking: object of the picking for which we are selecting the partner to invoice
@return: object of the partner to invoice
"""
return picking.address_id and picking.address_id.partner_id
def _get_comment_invoice(self, cr, uid, picking):
"""
@return: comment string for invoice
"""
return picking.note or ''
def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None):
""" Gets price unit for invoice
@param move_line: Stock move lines
@param type: Type of invoice
@return: The price unit for the move line
"""
if context is None:
context = {}
if type in ('in_invoice', 'in_refund'):
# Take the user company and pricetype
context['currency_id'] = move_line.company_id.currency_id.id
amount_unit = move_line.product_id.price_get('standard_price', context=context)[move_line.product_id.id]
return amount_unit
else:
return move_line.product_id.list_price
def _get_discount_invoice(self, cr, uid, move_line):
'''Return the discount for the move line'''
return 0.0
def _get_taxes_invoice(self, cr, uid, move_line, type):
""" Gets taxes on invoice
@param move_line: Stock move lines
@param type: Type of invoice
@return: Taxes Ids for the move line
"""
if type in ('in_invoice', 'in_refund'):
taxes = move_line.product_id.supplier_taxes_id
else:
taxes = move_line.product_id.taxes_id
if move_line.picking_id and move_line.picking_id.address_id and move_line.picking_id.address_id.partner_id:
return self.pool.get('account.fiscal.position').map_tax(
cr,
uid,
move_line.picking_id.address_id.partner_id.property_account_position,
taxes
)
else:
return map(lambda x: x.id, taxes)
def _get_account_analytic_invoice(self, cr, uid, picking, move_line):
return False
def _invoice_line_hook(self, cr, uid, move_line, invoice_line_id):
'''Call after the creation of the invoice line'''
return
def _invoice_hook(self, cr, uid, picking, invoice_id):
'''Call after the creation of the invoice'''
return
def _get_invoice_type(self, pick):
src_usage = dest_usage = None
inv_type = None
if pick.invoice_state == '2binvoiced':
if pick.move_lines:
src_usage = pick.move_lines[0].location_id.usage
dest_usage = pick.move_lines[0].location_dest_id.usage
if pick.type == 'out' and dest_usage == 'supplier':
inv_type = 'in_refund'
elif pick.type == 'out' and dest_usage == 'customer':
inv_type = 'out_invoice'
elif pick.type == 'in' and src_usage == 'supplier':
inv_type = 'in_invoice'
elif pick.type == 'in' and src_usage == 'customer':
inv_type = 'out_refund'
else:
inv_type = 'out_invoice'
return inv_type
def _prepare_invoice_group(self, cr, uid, picking, partner, invoice, context=None):
""" Builds the dict for grouped invoices
@param picking: picking object
@param partner: object of the partner to invoice (not used here, but may be usefull if this function is inherited)
@param invoice: object of the invoice that we are updating
@return: dict that will be used to update the invoice
"""
comment = self._get_comment_invoice(cr, uid, picking)
return {
'name': (invoice.name or '') + ', ' + (picking.name or ''),
'origin': (invoice.origin or '') + ', ' + (picking.name or '') + (picking.origin and (':' + picking.origin) or ''),
'comment': (comment and (invoice.comment and invoice.comment + "\n" + comment or comment)) or (invoice.comment and invoice.comment or ''),
'date_invoice': context.get('date_inv', False),
'user_id': uid,
}
def _prepare_invoice(self, cr, uid, picking, partner, inv_type, journal_id, context=None):
""" Builds the dict containing the values for the invoice
@param picking: picking object
@param partner: object of the partner to invoice
@param inv_type: type of the invoice ('out_invoice', 'in_invoice', ...)
@param journal_id: ID of the accounting journal
@return: dict that will be used to create the invoice object
"""
if inv_type in ('out_invoice', 'out_refund'):
account_id = partner.property_account_receivable.id
else:
account_id = partner.property_account_payable.id
address_contact_id, address_invoice_id = \
self.pool.get('res.partner').address_get(cr, uid, [partner.id],
['contact', 'invoice']).values()
comment = self._get_comment_invoice(cr, uid, picking)
invoice_vals = {
'name': picking.name,
'origin': (picking.name or '') + (picking.origin and (':' + picking.origin) or ''),
'type': inv_type,
'account_id': account_id,
'partner_id': partner.id,
'address_invoice_id': address_invoice_id,
'address_contact_id': address_contact_id,
'comment': comment,
'payment_term': partner.property_payment_term and partner.property_payment_term.id or False,
'fiscal_position': partner.property_account_position.id,
'date_invoice': context.get('date_inv', False),
'company_id': picking.company_id.id,
'user_id': uid,
}
cur_id = self.get_currency_id(cr, uid, picking)
if cur_id:
invoice_vals['currency_id'] = cur_id
if journal_id:
invoice_vals['journal_id'] = journal_id
return invoice_vals
def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id,
invoice_vals, context=None):
""" Builds the dict containing the values for the invoice line
@param group: True or False
@param picking: picking object
@param: move_line: move_line object
@param: invoice_id: ID of the related invoice
@param: invoice_vals: dict used to created the invoice
@return: dict that will be used to create the invoice line
"""
if group:
name = (picking.name or '') + '-' + move_line.name
else:
name = move_line.name
origin = move_line.picking_id.name or ''
if move_line.picking_id.origin:
origin += ':' + move_line.picking_id.origin
if invoice_vals['type'] in ('out_invoice', 'out_refund'):
account_id = move_line.product_id.product_tmpl_id.\
property_account_income.id
if not account_id:
account_id = move_line.product_id.categ_id.\
property_account_income_categ.id
else:
account_id = move_line.product_id.product_tmpl_id.\
property_account_expense.id
if not account_id:
account_id = move_line.product_id.categ_id.\
property_account_expense_categ.id
if invoice_vals['fiscal_position']:
fp_obj = self.pool.get('account.fiscal.position')
fiscal_position = fp_obj.browse(cr, uid, invoice_vals['fiscal_position'], context=context)
account_id = fp_obj.map_account(cr, uid, fiscal_position, account_id)
# set UoS if it's a sale and the picking doesn't have one
uos_id = move_line.product_uos and move_line.product_uos.id or False
if not uos_id and invoice_vals['type'] in ('out_invoice', 'out_refund'):
uos_id = move_line.product_uom.id
return {
'name': name,
'origin': origin,
'invoice_id': invoice_id,
'uos_id': uos_id,
'product_id': move_line.product_id.id,
'account_id': account_id,
'price_unit': self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type']),
'discount': self._get_discount_invoice(cr, uid, move_line),
'quantity': move_line.product_uos_qty or move_line.product_qty,
'invoice_line_tax_id': [(6, 0, self._get_taxes_invoice(cr, uid, move_line, invoice_vals['type']))],
'account_analytic_id': self._get_account_analytic_invoice(cr, uid, picking, move_line),
}
def action_invoice_create(self, cr, uid, ids, journal_id=False,
group=False, type='out_invoice', context=None):
""" Creates invoice based on the invoice state selected for picking.
@param journal_id: Id of journal
@param group: Whether to create a group invoice or not
@param type: Type invoice to be created
@return: Ids of created invoices for the pickings
"""
if context is None:
context = {}
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
invoices_group = {}
res = {}
inv_type = type
for picking in self.browse(cr, uid, ids, context=context):
if picking.invoice_state != '2binvoiced':
continue
partner = self._get_partner_to_invoice(cr, uid, picking, context=context)
if not partner:
raise osv.except_osv(_('Error, no partner !'),
_('Please put a partner on the picking list if you want to generate invoice.'))
if not inv_type:
inv_type = self._get_invoice_type(picking)
if group and partner.id in invoices_group:
invoice_id = invoices_group[partner.id]
invoice = invoice_obj.browse(cr, uid, invoice_id)
invoice_vals_group = self._prepare_invoice_group(cr, uid, picking, partner, invoice, context=context)
invoice_obj.write(cr, uid, [invoice_id], invoice_vals_group, context=context)
else:
invoice_vals = self._prepare_invoice(cr, uid, picking, partner, inv_type, journal_id, context=context)
invoice_id = invoice_obj.create(cr, uid, invoice_vals, context=context)
invoices_group[partner.id] = invoice_id
res[picking.id] = invoice_id
for move_line in picking.move_lines:
if move_line.state == 'cancel':
continue
vals = self._prepare_invoice_line(cr, uid, group, picking, move_line,
invoice_id, invoice_vals, context=context)
if vals:
invoice_line_id = invoice_line_obj.create(cr, uid, vals, context=context)
self._invoice_line_hook(cr, uid, move_line, invoice_line_id)
invoice_obj.button_compute(cr, uid, [invoice_id], context=context,
set_total=(inv_type in ('in_invoice', 'in_refund')))
self.write(cr, uid, [picking.id], {
'invoice_state': 'invoiced',
}, context=context)
self._invoice_hook(cr, uid, picking, invoice_id)
self.write(cr, uid, res.keys(), {
'invoice_state': 'invoiced',
}, context=context)
return res
def test_done(self, cr, uid, ids, context=None):
""" Test whether the move lines are done or not.
@return: True or False
"""
ok = False
for pick in self.browse(cr, uid, ids, context=context):
if not pick.move_lines:
return True
for move in pick.move_lines:
if move.state not in ('cancel','done'):
return False
if move.state=='done':
ok = True
return ok
def test_cancel(self, cr, uid, ids, context=None):
""" Test whether the move lines are canceled or not.
@return: True or False
"""
for pick in self.browse(cr, uid, ids, context=context):
for move in pick.move_lines:
if move.state not in ('cancel',):
return False
return True
def allow_cancel(self, cr, uid, ids, context=None):
for pick in self.browse(cr, uid, ids, context=context):
if not pick.move_lines:
return True
for move in pick.move_lines:
if move.state == 'done':
raise osv.except_osv(_('Error'), _('You cannot cancel picking because stock move is in done state !'))
return True
def unlink(self, cr, uid, ids, context=None):
move_obj = self.pool.get('stock.move')
if context is None:
context = {}
for pick in self.browse(cr, uid, ids, context=context):
if pick.state in ['done','cancel']:
raise osv.except_osv(_('Error'), _('You cannot remove the picking which is in %s state !')%(pick.state,))
else:
ids2 = [move.id for move in pick.move_lines]
ctx = context.copy()
ctx.update({'call_unlink':True})
if pick.state != 'draft':
#Cancelling the move in order to affect Virtual stock of product
move_obj.action_cancel(cr, uid, ids2, ctx)
#Removing the move
move_obj.unlink(cr, uid, ids2, ctx)
return super(stock_picking, self).unlink(cr, uid, ids, context=context)
# FIXME: needs refactoring, this code is partially duplicated in stock_move.do_partial()!
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial picking and moves done.
@param partial_datas : Dictionary containing details of partial picking
like partner_id, address_id, delivery_date,
delivery moves with product_id, product_qty, uom
@return: Dictionary of values
"""
if context is None:
context = {}
else:
context = dict(context)
res = {}
move_obj = self.pool.get('stock.move')
product_obj = self.pool.get('product.product')
currency_obj = self.pool.get('res.currency')
uom_obj = self.pool.get('product.uom')
sequence_obj = self.pool.get('ir.sequence')
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids, context=context):
new_picking = None
complete, too_many, too_few = [], [], []
move_product_qty, prodlot_ids, product_avail, partial_qty, product_uoms = {}, {}, {}, {}, {}
for move in pick.move_lines:
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s'%(move.id), {})
product_qty = partial_data.get('product_qty',0.0)
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom',False)
product_price = partial_data.get('product_price',0.0)
product_currency = partial_data.get('product_currency',False)
prodlot_id = partial_data.get('prodlot_id')
prodlot_ids[move.id] = prodlot_id
product_uoms[move.id] = product_uom
partial_qty[move.id] = uom_obj._compute_qty(cr, uid, product_uoms[move.id], product_qty, move.product_uom.id)
if move.product_qty == partial_qty[move.id]:
complete.append(move)
elif move.product_qty > partial_qty[move.id]:
too_few.append(move)
else:
too_many.append(move)
# Average price computation
if (pick.type == 'in') and (move.product_id.cost_method == 'average'):
product = product_obj.browse(cr, uid, move.product_id.id)
move_currency_id = move.company_id.currency_id.id
context['currency_id'] = move_currency_id
qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)
if product.id in product_avail:
product_avail[product.id] += qty
else:
product_avail[product.id] = product.qty_available
if qty > 0:
new_price = currency_obj.compute(cr, uid, product_currency,
move_currency_id, product_price)
new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
product.uom_id.id)
if product.qty_available <= 0:
new_std_price = new_price
else:
# Get the standard price
amount_unit = product.price_get('standard_price', context=context)[product.id]
new_std_price = ((amount_unit * product_avail[product.id])\
+ (new_price * qty))/(product_avail[product.id] + qty)
# Write the field according to price type field
product_obj.write(cr, uid, [product.id], {'standard_price': new_std_price})
# Record the values that were chosen in the wizard, so they can be
# used for inventory valuation if real-time valuation is enabled.
move_obj.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency})
for move in too_few:
product_qty = move_product_qty[move.id]
if not new_picking:
new_picking = self.copy(cr, uid, pick.id,
{
'name': sequence_obj.get(cr, uid, 'stock.picking.%s'%(pick.type)),
'move_lines' : [],
'state':'draft',
})
if product_qty != 0:
defaults = {
'product_qty' : product_qty,
'product_uos_qty': product_qty, #TODO: put correct uos_qty
'picking_id' : new_picking,
'state': 'assigned',
'move_dest_id': False,
'price_unit': move.price_unit,
'product_uom': product_uoms[move.id]
}
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
defaults.update(prodlot_id=prodlot_id)
move_obj.copy(cr, uid, move.id, defaults)
move_obj.write(cr, uid, [move.id],
{
'product_qty' : move.product_qty - partial_qty[move.id],
'product_uos_qty': move.product_qty - partial_qty[move.id], #TODO: put correct uos_qty
})
if new_picking:
move_obj.write(cr, uid, [c.id for c in complete], {'picking_id': new_picking})
for move in complete:
defaults = {'product_uom': product_uoms[move.id], 'product_qty': move_product_qty[move.id]}
if prodlot_ids.get(move.id):
defaults.update({'prodlot_id': prodlot_ids[move.id]})
move_obj.write(cr, uid, [move.id], defaults)
for move in too_many:
product_qty = move_product_qty[move.id]
defaults = {
'product_qty' : product_qty,
'product_uos_qty': product_qty, #TODO: put correct uos_qty
'product_uom': product_uoms[move.id]
}
prodlot_id = prodlot_ids.get(move.id)
if prodlot_ids.get(move.id):
defaults.update(prodlot_id=prodlot_id)
if new_picking:
defaults.update(picking_id=new_picking)
move_obj.write(cr, uid, [move.id], defaults)
# At first we confirm the new picking (if necessary)
if new_picking:
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_confirm', cr)
# Then we finish the good picking
self.write(cr, uid, [pick.id], {'backorder_id': new_picking})
self.action_move(cr, uid, [new_picking])
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_done', cr)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
delivered_pack_id = new_picking
else:
self.action_move(cr, uid, [pick.id])
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_done', cr)
delivered_pack_id = pick.id
delivered_pack = self.browse(cr, uid, delivered_pack_id, context=context)
res[pick.id] = {'delivered_picking': delivered_pack.id or False}
return res
def log_picking(self, cr, uid, ids, context=None):
""" This function will create log messages for picking.
@param cr: the database cursor
@param uid: the current user's ID for security checks,
@param ids: List of Picking Ids
@param context: A standard dictionary for contextual values
"""
if context is None:
context = {}
data_obj = self.pool.get('ir.model.data')
for pick in self.browse(cr, uid, ids, context=context):
msg=''
if pick.auto_picking:
continue
type_list = {
'out':_("Delivery Order"),
'in':_('Reception'),
'internal': _('Internal picking'),
}
view_list = {
'out': 'view_picking_out_form',
'in': 'view_picking_in_form',
'internal': 'view_picking_form',
}
message = type_list.get(pick.type, _('Document')) + " '" + (pick.name or '?') + "' "
if pick.min_date:
msg= _(' for the ')+ datetime.strptime(pick.min_date, '%Y-%m-%d %H:%M:%S').strftime('%m/%d/%Y')
state_list = {
'confirmed': _('is scheduled %s.') % msg,
'assigned': _('is ready to process.'),
'cancel': _('is cancelled.'),
'done': _('is done.'),
'auto': _('is waiting.'),
'draft': _('is in draft state.'),
}
res = data_obj.get_object_reference(cr, uid, 'stock', view_list.get(pick.type, 'view_picking_form'))
context.update({'view_id': res and res[1] or False})
message += state_list[pick.state]
self.log(cr, uid, pick.id, message, context=context)
return True
stock_picking()
class stock_production_lot(osv.osv):
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['name', 'prefix', 'ref'], context)
res = []
for record in reads:
name = record['name']
prefix = record['prefix']
if prefix:
name = prefix + '/' + name
if record['ref']:
name = '%s [%s]' % (name, record['ref'])
res.append((record['id'], name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
args = args or []
ids = []
if name:
ids = self.search(cr, uid, [('prefix', '=', name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
_name = 'stock.production.lot'
_description = 'Production lot'
def _get_stock(self, cr, uid, ids, field_name, arg, context=None):
""" Gets stock of products for locations
@return: Dictionary of values
"""
if context is None:
context = {}
if 'location_id' not in context:
locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')], context=context)
else:
locations = context['location_id'] and [context['location_id']] or []
if isinstance(ids, (int, long)):
ids = [ids]
res = {}.fromkeys(ids, 0.0)
if locations:
cr.execute('''select
prodlot_id,
sum(qty)
from
stock_report_prodlots
where
location_id IN %s and prodlot_id IN %s group by prodlot_id''',(tuple(locations),tuple(ids),))
res.update(dict(cr.fetchall()))
return res
def _stock_search(self, cr, uid, obj, name, args, context=None):
""" Searches Ids of products
@return: Ids of locations
"""
locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')])
cr.execute('''select
prodlot_id,
sum(qty)
from
stock_report_prodlots
where
location_id IN %s group by prodlot_id
having sum(qty) '''+ str(args[0][1]) + str(args[0][2]),(tuple(locations),))
res = cr.fetchall()
ids = [('id', 'in', map(lambda x: x[0], res))]
return ids
_columns = {
'name': fields.char('Production Lot', size=64, required=True, help="Unique production lot, will be displayed as: PREFIX/SERIAL [INT_REF]"),
'ref': fields.char('Internal Reference', size=256, help="Internal reference number in case it differs from the manufacturer's serial number"),
'prefix': fields.char('Prefix', size=64, help="Optional prefix to prepend when displaying this serial number: PREFIX/SERIAL [INT_REF]"),
'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]),
'date': fields.datetime('Creation Date', required=True),
'stock_available': fields.function(_get_stock, fnct_search=_stock_search, type="float", string="Available", select=True,
help="Current quantity of products with this Production Lot Number available in company warehouses",
digits_compute=dp.get_precision('Product UoM')),
'revisions': fields.one2many('stock.production.lot.revision', 'lot_id', 'Revisions'),
'company_id': fields.many2one('res.company', 'Company', select=True),
'move_ids': fields.one2many('stock.move', 'prodlot_id', 'Moves for this production lot', readonly=True),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'),
'product_id': lambda x, y, z, c: c.get('product_id', False),
}
_sql_constraints = [
('name_ref_uniq', 'unique (name, ref)', 'The combination of serial number and internal reference must be unique !'),
]
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of a product
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
value=self.pool.get('action.traceability').action_traceability(cr,uid,ids,context)
return value
stock_production_lot()
class stock_production_lot_revision(osv.osv):
_name = 'stock.production.lot.revision'
_description = 'Production lot revisions'
_columns = {
'name': fields.char('Revision Name', size=64, required=True),
'description': fields.text('Description'),
'date': fields.date('Revision Date'),
'indice': fields.char('Revision Number', size=16),
'author_id': fields.many2one('res.users', 'Author'),
'lot_id': fields.many2one('stock.production.lot', 'Production lot', select=True, ondelete='cascade'),
'company_id': fields.related('lot_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
}
_defaults = {
'author_id': lambda x, y, z, c: z,
'date': fields.date.context_today,
}
stock_production_lot_revision()
# ----------------------------------------------------
# Move
# ----------------------------------------------------
#
# Fields:
# location_dest_id is only used for predicting futur stocks
#
class stock_move(osv.osv):
def _getSSCC(self, cr, uid, context=None):
cr.execute('select id from stock_tracking where create_uid=%s order by id desc limit 1', (uid,))
res = cr.fetchone()
return (res and res[0]) or False
_name = "stock.move"
_description = "Stock Move"
_order = 'date_expected desc, id'
_log_create = False
def action_partial_move(self, cr, uid, ids, context=None):
if context is None: context = {}
if context.get('active_model') != self._name:
context.update(active_ids=ids, active_model=self._name)
partial_id = self.pool.get("stock.partial.move").create(
cr, uid, {}, context=context)
return {
'name':_("Products to Process"),
'view_mode': 'form',
'view_id': False,
'view_type': 'form',
'res_model': 'stock.partial.move',
'res_id': partial_id,
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'new',
'domain': '[]',
'context': context
}
def name_get(self, cr, uid, ids, context=None):
res = []
for line in self.browse(cr, uid, ids, context=context):
res.append((line.id, (line.product_id.code or '/')+': '+line.location_id.name+' > '+line.location_dest_id.name))
return res
def _check_tracking(self, cr, uid, ids, context=None):
""" Checks if production lot is assigned to stock move or not.
@return: True or False
"""
for move in self.browse(cr, uid, ids, context=context):
if not move.prodlot_id and \
(move.state == 'done' and \
( \
(move.product_id.track_production and move.location_id.usage == 'production') or \
(move.product_id.track_production and move.location_dest_id.usage == 'production') or \
(move.product_id.track_incoming and move.location_id.usage == 'supplier') or \
(move.product_id.track_outgoing and move.location_dest_id.usage == 'customer') \
)):
return False
return True
def _check_product_lot(self, cr, uid, ids, context=None):
""" Checks whether move is done or not and production lot is assigned to that move.
@return: True or False
"""
for move in self.browse(cr, uid, ids, context=context):
if move.prodlot_id and move.state == 'done' and (move.prodlot_id.product_id.id != move.product_id.id):
return False
return True
_columns = {
'name': fields.char('Name', size=250, required=True, select=True),
'priority': fields.selection([('0', 'Not urgent'), ('1', 'Urgent')], 'Priority'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}),
'date_expected': fields.datetime('Scheduled Date', states={'done': [('readonly', True)]},required=True, select=True, help="Scheduled date for the processing of this move"),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type','<>','service')],states={'done': [('readonly', True)]}),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product UoM'), required=True,states={'done': [('readonly', True)]}),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True,states={'done': [('readonly', True)]}),
'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product UoM'), states={'done': [('readonly', True)]}),
'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}),
'product_packaging': fields.many2one('product.packaging', 'Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True,states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True,states={'done': [('readonly', True)]}, select=True, help="Location where the system will stock the finished products."),
'address_id': fields.many2one('res.partner.address', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"),
'prodlot_id': fields.many2one('stock.production.lot', 'Production Lot', states={'done': [('readonly', True)]}, help="Production lot is used to put a serial number on the production", select=True),
'tracking_id': fields.many2one('stock.tracking', 'Pack', select=True, states={'done': [('readonly', True)]}, help="Logistical shipping unit: pallet, box, pack ..."),
'auto_validate': fields.boolean('Auto Validate'),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True),
'move_history_ids': fields.many2many('stock.move', 'stock_move_history_ids', 'parent_id', 'child_id', 'Move History (child moves)'),
'move_history_ids2': fields.many2many('stock.move', 'stock_move_history_ids', 'child_id', 'parent_id', 'Move History (parent moves)'),
'picking_id': fields.many2one('stock.picking', 'Reference', select=True,states={'done': [('readonly', True)]}),
'note': fields.text('Notes'),
'state': fields.selection([('draft', 'New'), ('waiting', 'Waiting Another Move'), ('confirmed', 'Waiting Availability'), ('assigned', 'Available'), ('done', 'Done'), ('cancel', 'Cancelled')], 'State', readonly=True, select=True,
help='When the stock move is created it is in the \'Draft\' state.\n After that, it is set to \'Not Available\' state if the scheduler did not find the products.\n When products are reserved it is set to \'Available\'.\n When the picking is done the state is \'Done\'.\
\nThe state is \'Waiting\' if the move is waiting for another one.'),
'price_unit': fields.float('Unit Price', digits_compute= dp.get_precision('Account'), help="Technical field used to record the product cost set by the user during a picking confirmation (when average price costing method is used)"),
'price_currency_id': fields.many2one('res.currency', 'Currency for average price', help="Technical field used to record the currency chosen by the user during a picking confirmation (when average price costing method is used)"),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'partner_id': fields.related('picking_id','address_id','partner_id',type='many2one', relation="res.partner", string="Partner", store=True, select=True),
'backorder_id': fields.related('picking_id','backorder_id',type='many2one', relation="stock.picking", string="Back Order", select=True),
'origin': fields.related('picking_id','origin',type='char', size=64, relation="stock.picking", string="Origin", store=True),
# used for colors in tree views:
'scrapped': fields.related('location_dest_id','scrap_location',type='boolean',relation='stock.location',string='Scrapped', readonly=True),
}
def _check_location(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if (record.state=='done') and (record.location_dest_id.usage == 'view' or record.location_id.usage == 'view'):
return False
return True
_constraints = [
(_check_tracking,
'You must assign a production lot for this product',
['prodlot_id']),
(_check_location, 'You can not move products from or to a location of the type view.',
['location_id','location_dest_id']),
(_check_product_lot,
'You try to assign a lot which is not from the same product',
['prodlot_id'])]
def _default_location_destination(self, cr, uid, context=None):
""" Gets default address of partner for destination location
@return: Address id or False
"""
mod_obj = self.pool.get('ir.model.data')
picking_type = context.get('picking_type')
location_id = False
if context is None:
context = {}
if context.get('move_line', []):
if context['move_line'][0]:
if isinstance(context['move_line'][0], (tuple, list)):
location_id = context['move_line'][0][2] and context['move_line'][0][2].get('location_dest_id',False)
else:
move_list = self.pool.get('stock.move').read(cr, uid, context['move_line'][0], ['location_dest_id'])
location_id = move_list and move_list['location_dest_id'][0] or False
elif context.get('address_out_id', False):
property_out = self.pool.get('res.partner.address').browse(cr, uid, context['address_out_id'], context).partner_id.property_stock_customer
location_id = property_out and property_out.id or False
else:
location_xml_id = False
if picking_type == 'in':
location_xml_id = 'stock_location_stock'
elif picking_type == 'out':
location_xml_id = 'stock_location_customers'
if location_xml_id:
location_model, location_id = mod_obj.get_object_reference(cr, uid, 'stock', location_xml_id)
return location_id
def _default_location_source(self, cr, uid, context=None):
""" Gets default address of partner for source location
@return: Address id or False
"""
mod_obj = self.pool.get('ir.model.data')
picking_type = context.get('picking_type')
location_id = False
if context is None:
context = {}
if context.get('move_line', []):
try:
location_id = context['move_line'][0][2]['location_id']
except:
pass
elif context.get('address_in_id', False):
part_obj_add = self.pool.get('res.partner.address').browse(cr, uid, context['address_in_id'], context=context)
if part_obj_add.partner_id:
location_id = part_obj_add.partner_id.property_stock_supplier.id
else:
location_xml_id = False
if picking_type == 'in':
location_xml_id = 'stock_location_suppliers'
elif picking_type == 'out':
location_xml_id = 'stock_location_stock'
if location_xml_id:
location_model, location_id = mod_obj.get_object_reference(cr, uid, 'stock', location_xml_id)
return location_id
_defaults = {
'location_id': _default_location_source,
'location_dest_id': _default_location_destination,
'state': 'draft',
'priority': '1',
'product_qty': 1.0,
'scrapped' : False,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c),
'date_expected': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if uid != 1:
frozen_fields = set(['product_qty', 'product_uom', 'product_uos_qty', 'product_uos', 'location_id', 'location_dest_id', 'product_id'])
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
if frozen_fields.intersection(vals):
raise osv.except_osv(_('Operation forbidden'),
_('Quantities, UoMs, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator)'))
return super(stock_move, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
default.update({'move_history_ids2': [], 'move_history_ids': []})
return super(stock_move, self).copy(cr, uid, id, default, context=context)
def _auto_init(self, cursor, context=None):
res = super(stock_move, self)._auto_init(cursor, context=context)
cursor.execute('SELECT indexname \
FROM pg_indexes \
WHERE indexname = \'stock_move_location_id_location_dest_id_product_id_state\'')
if not cursor.fetchone():
cursor.execute('CREATE INDEX stock_move_location_id_location_dest_id_product_id_state \
ON stock_move (product_id, state, location_id, location_dest_id)')
return res
def onchange_lot_id(self, cr, uid, ids, prodlot_id=False, product_qty=False,
loc_id=False, product_id=False, uom_id=False, context=None):
""" On change of production lot gives a warning message.
@param prodlot_id: Changed production lot id
@param product_qty: Quantity of product
@param loc_id: Location id
@param product_id: Product id
@return: Warning message
"""
if not prodlot_id or not loc_id:
return {}
ctx = context and context.copy() or {}
ctx['location_id'] = loc_id
ctx.update({'raise-exception': True})
uom_obj = self.pool.get('product.uom')
product_obj = self.pool.get('product.product')
product_uom = product_obj.browse(cr, uid, product_id, context=ctx).uom_id
prodlot = self.pool.get('stock.production.lot').browse(cr, uid, prodlot_id, context=ctx)
location = self.pool.get('stock.location').browse(cr, uid, loc_id, context=ctx)
uom = uom_obj.browse(cr, uid, uom_id, context=ctx)
amount_actual = uom_obj._compute_qty_obj(cr, uid, product_uom, prodlot.stock_available, uom, context=ctx)
warning = {}
if (location.usage == 'internal') and (product_qty > (amount_actual or 0.0)):
warning = {
'title': _('Insufficient Stock in Lot !'),
'message': _('You are moving %.2f %s products but only %.2f %s available in this lot.') % (product_qty, uom.name, amount_actual, uom.name)
}
return {'warning': warning}
def onchange_quantity(self, cr, uid, ids, product_id, product_qty,
product_uom, product_uos):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_qty: Changed Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uos_qty': 0.00
}
if (not product_id) or (product_qty <=0.0):
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
if product_uos and product_uom and (product_uom != product_uos):
result['product_uos_qty'] = product_qty * uos_coeff['uos_coeff']
else:
result['product_uos_qty'] = product_qty
return {'value': result}
def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty,
product_uos, product_uom):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_uos_qty: Changed UoS Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_qty': 0.00
}
if (not product_id) or (product_uos_qty <=0.0):
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
if product_uos and product_uom and (product_uom != product_uos):
result['product_qty'] = product_uos_qty / uos_coeff['uos_coeff']
else:
result['product_qty'] = product_uos_qty
return {'value': result}
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False,
loc_dest_id=False, address_id=False):
""" On change of product id, if finds UoM, UoS, quantity and UoS quantity.
@param prod_id: Changed Product id
@param loc_id: Source location id
@param loc_dest_id: Destination location id
@param address_id: Address id of partner
@return: Dictionary of values
"""
if not prod_id:
return {}
lang = False
if address_id:
addr_rec = self.pool.get('res.partner.address').browse(cr, uid, address_id)
if addr_rec:
lang = addr_rec.partner_id and addr_rec.partner_id.lang or False
ctx = {'lang': lang}
product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0]
uos_id = product.uos_id and product.uos_id.id or False
result = {
'product_uom': product.uom_id.id,
'product_uos': uos_id,
'product_qty': 1.00,
'product_uos_qty' : self.pool.get('stock.move').onchange_quantity(cr, uid, ids, prod_id, 1.00, product.uom_id.id, uos_id)['value']['product_uos_qty']
}
if not ids:
result['name'] = product.partner_ref
if loc_id:
result['location_id'] = loc_id
if loc_dest_id:
result['location_dest_id'] = loc_dest_id
return {'value': result}
def onchange_date(self, cr, uid, ids, date, date_expected, context=None):
""" On change of Scheduled Date gives a Move date.
@param date_expected: Scheduled Date
@param date: Move Date
@return: Move Date
"""
if not date_expected:
date_expected = time.strftime('%Y-%m-%d %H:%M:%S')
return {'value':{'date': date_expected}}
def _chain_compute(self, cr, uid, moves, context=None):
""" Finds whether the location has chained location type or not.
@param moves: Stock moves
@return: Dictionary containing destination location with chained location type.
"""
result = {}
for m in moves:
dest = self.pool.get('stock.location').chained_location_get(
cr,
uid,
m.location_dest_id,
m.picking_id and m.picking_id.address_id and m.picking_id.address_id.partner_id,
m.product_id,
context
)
if dest:
if dest[1] == 'transparent':
newdate = (datetime.strptime(m.date, '%Y-%m-%d %H:%M:%S') + relativedelta(days=dest[2] or 0)).strftime('%Y-%m-%d')
self.write(cr, uid, [m.id], {
'date': newdate,
'location_dest_id': dest[0].id})
if m.picking_id and (dest[3] or dest[5]):
self.pool.get('stock.picking').write(cr, uid, [m.picking_id.id], {
'stock_journal_id': dest[3] or m.picking_id.stock_journal_id.id,
'type': dest[5] or m.picking_id.type
}, context=context)
m.location_dest_id = dest[0]
res2 = self._chain_compute(cr, uid, [m], context=context)
for pick_id in res2.keys():
result.setdefault(pick_id, [])
result[pick_id] += res2[pick_id]
else:
result.setdefault(m.picking_id, [])
result[m.picking_id].append( (m, dest) )
return result
def _prepare_chained_picking(self, cr, uid, picking_name, picking, picking_type, moves_todo, context=None):
"""Prepare the definition (values) to create a new chained picking.
:param str picking_name: desired new picking name
:param browse_record picking: source picking (being chained to)
:param str picking_type: desired new picking type
:param list moves_todo: specification of the stock moves to be later included in this
picking, in the form::
[[move, (dest_location, auto_packing, chained_delay, chained_journal,
chained_company_id, chained_picking_type)],
...
]
See also :meth:`stock_location.chained_location_get`.
"""
res_company = self.pool.get('res.company')
return {
'name': picking_name,
'origin': tools.ustr(picking.origin or ''),
'type': picking_type,
'note': picking.note,
'move_type': picking.move_type,
'auto_picking': moves_todo[0][1][1] == 'auto',
'stock_journal_id': moves_todo[0][1][3],
'company_id': moves_todo[0][1][4] or res_company._company_default_get(cr, uid, 'stock.company', context=context),
'address_id': picking.address_id.id,
'invoice_state': 'none',
'date': picking.date,
}
def _create_chained_picking(self, cr, uid, picking_name, picking, picking_type, moves_todo, context=None):
picking_obj = self.pool.get('stock.picking')
return picking_obj.create(cr, uid, self._prepare_chained_picking(cr, uid, picking_name, picking, picking_type, moves_todo, context=context))
def create_chained_picking(self, cr, uid, moves, context=None):
res_obj = self.pool.get('res.company')
location_obj = self.pool.get('stock.location')
move_obj = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
new_moves = []
if context is None:
context = {}
seq_obj = self.pool.get('ir.sequence')
for picking, todo in self._chain_compute(cr, uid, moves, context=context).items():
ptype = todo[0][1][5] and todo[0][1][5] or location_obj.picking_type_get(cr, uid, todo[0][0].location_dest_id, todo[0][1][0])
if picking:
# name of new picking according to its type
new_pick_name = seq_obj.get(cr, uid, 'stock.picking.' + ptype)
pickid = self._create_chained_picking(cr, uid, new_pick_name, picking, ptype, todo, context=context)
# Need to check name of old picking because it always considers picking as "OUT" when created from Sale Order
old_ptype = location_obj.picking_type_get(cr, uid, picking.move_lines[0].location_id, picking.move_lines[0].location_dest_id)
if old_ptype != picking.type:
old_pick_name = seq_obj.get(cr, uid, 'stock.picking.' + old_ptype)
self.pool.get('stock.picking').write(cr, uid, [picking.id], {'name': old_pick_name}, context=context)
else:
pickid = False
for move, (loc, dummy, delay, dummy, company_id, ptype) in todo:
new_id = move_obj.copy(cr, uid, move.id, {
'location_id': move.location_dest_id.id,
'location_dest_id': loc.id,
'date_moved': time.strftime('%Y-%m-%d'),
'picking_id': pickid,
'state': 'waiting',
'company_id': company_id or res_obj._company_default_get(cr, uid, 'stock.company', context=context) ,
'move_history_ids': [],
'date': (datetime.strptime(move.date, '%Y-%m-%d %H:%M:%S') + relativedelta(days=delay or 0)).strftime('%Y-%m-%d'),
'move_history_ids2': []}
)
move_obj.write(cr, uid, [move.id], {
'move_dest_id': new_id,
'move_history_ids': [(4, new_id)]
})
new_moves.append(self.browse(cr, uid, [new_id])[0])
if pickid:
wf_service.trg_validate(uid, 'stock.picking', pickid, 'button_confirm', cr)
if new_moves:
new_moves += self.create_chained_picking(cr, uid, new_moves, context)
return new_moves
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms stock move.
@return: List of ids.
"""
moves = self.browse(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'state': 'confirmed'})
self.create_chained_picking(cr, uid, moves, context)
return []
def action_assign(self, cr, uid, ids, *args):
""" Changes state to confirmed or waiting.
@return: List of values
"""
todo = []
for move in self.browse(cr, uid, ids):
if move.state in ('confirmed', 'waiting'):
todo.append(move.id)
res = self.check_assign(cr, uid, todo)
return res
def force_assign(self, cr, uid, ids, context=None):
""" Changes the state to assigned.
@return: True
"""
self.write(cr, uid, ids, {'state': 'assigned'})
return True
def cancel_assign(self, cr, uid, ids, context=None):
""" Changes the state to confirmed.
@return: True
"""
self.write(cr, uid, ids, {'state': 'confirmed'})
# fix for bug lp:707031
# called write of related picking because changing move availability does
# not trigger workflow of picking in order to change the state of picking
wf_service = netsvc.LocalService('workflow')
for move in self.browse(cr, uid, ids, context):
if move.picking_id:
wf_service.trg_write(uid, 'stock.picking', move.picking_id.id, cr)
return True
#
# Duplicate stock.move
#
def check_assign(self, cr, uid, ids, context=None):
""" Checks the product type and accordingly writes the state.
@return: No. of moves done
"""
done = []
count = 0
pickings = {}
if context is None:
context = {}
for move in self.browse(cr, uid, ids, context=context):
if move.product_id.type == 'consu' or move.location_id.usage == 'supplier':
if move.state in ('confirmed', 'waiting'):
done.append(move.id)
pickings[move.picking_id.id] = 1
continue
if move.state in ('confirmed', 'waiting'):
# Important: we must pass lock=True to _product_reserve() to avoid race conditions and double reservations
res = self.pool.get('stock.location')._product_reserve(cr, uid, [move.location_id.id], move.product_id.id, move.product_qty, {'uom': move.product_uom.id}, lock=True)
if res:
#_product_available_test depends on the next status for correct functioning
#the test does not work correctly if the same product occurs multiple times
#in the same order. This is e.g. the case when using the button 'split in two' of
#the stock outgoing form
self.write(cr, uid, [move.id], {'state':'assigned'})
done.append(move.id)
pickings[move.picking_id.id] = 1
r = res.pop(0)
cr.execute('update stock_move set location_id=%s, product_qty=%s where id=%s', (r[1], r[0], move.id))
while res:
r = res.pop(0)
move_id = self.copy(cr, uid, move.id, {'product_qty': r[0], 'location_id': r[1]})
done.append(move_id)
if done:
count += len(done)
self.write(cr, uid, done, {'state': 'assigned'})
if count:
for pick_id in pickings:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_write(uid, 'stock.picking', pick_id, cr)
return count
def setlast_tracking(self, cr, uid, ids, context=None):
tracking_obj = self.pool.get('stock.tracking')
picking = self.browse(cr, uid, ids, context=context)[0].picking_id
if picking:
last_track = [line.tracking_id.id for line in picking.move_lines if line.tracking_id]
if not last_track:
last_track = tracking_obj.create(cr, uid, {}, context=context)
else:
last_track.sort()
last_track = last_track[-1]
self.write(cr, uid, ids, {'tracking_id': last_track})
return True
#
# Cancel move => cancel others move and pickings
#
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the moves and if all moves are cancelled it cancels the picking.
@return: True
"""
if not len(ids):
return True
if context is None:
context = {}
pickings = {}
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('confirmed', 'waiting', 'assigned', 'draft'):
if move.picking_id:
pickings[move.picking_id.id] = True
if move.move_dest_id and move.move_dest_id.state == 'waiting':
self.write(cr, uid, [move.move_dest_id.id], {'state': 'assigned'})
if context.get('call_unlink',False) and move.move_dest_id.picking_id:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_write(uid, 'stock.picking', move.move_dest_id.picking_id.id, cr)
self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False})
if not context.get('call_unlink',False):
for pick in self.pool.get('stock.picking').browse(cr, uid, pickings.keys()):
if all(move.state == 'cancel' for move in pick.move_lines):
self.pool.get('stock.picking').write(cr, uid, [pick.id], {'state': 'cancel'})
wf_service = netsvc.LocalService("workflow")
for id in ids:
wf_service.trg_trigger(uid, 'stock.move', id, cr)
return True
def _get_accounting_data_for_valuation(self, cr, uid, move, context=None):
"""
Return the accounts and journal to use to post Journal Entries for the real-time
valuation of the move.
:param context: context dictionary that can explicitly mention the company to consider via the 'force_company' key
:raise: osv.except_osv() is any mandatory account or journal is not defined.
"""
product_obj=self.pool.get('product.product')
accounts = product_obj.get_product_accounts(cr, uid, move.product_id.id, context)
if move.location_id.valuation_out_account_id:
acc_src = move.location_id.valuation_out_account_id.id
else:
acc_src = accounts['stock_account_input']
if move.location_dest_id.valuation_in_account_id:
acc_dest = move.location_dest_id.valuation_in_account_id.id
else:
acc_dest = accounts['stock_account_output']
acc_valuation = accounts.get('property_stock_valuation_account_id', False)
journal_id = accounts['stock_journal']
if acc_dest == acc_valuation:
raise osv.except_osv(_('Error!'), _('Can not create Journal Entry, Output Account defined on this product and Valuation account on category of this product are same.'))
| if acc_src == acc_valuation: | 9,425 | lcc_e | python | null | 510fda46c7dd55d3196a9e71e2f528e1627ff2d61c2ef425 |
|
"""
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from __future__ import division, absolute_import, print_function
from numpy.lib import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<type 'numpy.flatiter'>
>>> for item in fl:
... print item
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> fl.next()
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> fl.next()
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* "buffered" enables buffering when required.
* "c_index" causes a C-order index to be tracked.
* "f_index" causes a Fortran-order index to be tracked.
* "multi_index" causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* "common_dtype" causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* "delay_bufalloc" delays allocation of the buffers until
a reset() call is made. Allows "allocate" operands to
be initialized before their values are copied into the buffers.
* "external_loop" causes the `values` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* "grow_inner" allows the `value` array sizes to be made
larger than the buffer size when both "buffered" and
"external_loop" is used.
* "ranged" allows the iterator to be restricted to a sub-range
of the iterindex values.
* "refs_ok" enables iteration of reference types, such as
object arrays.
* "reduce_ok" enables iteration of "readwrite" operands
which are broadcasted, also known as reduction operands.
* "zerosize_ok" allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
"readonly", "readwrite", or "writeonly" must be specified.
* "readonly" indicates the operand will only be read from.
* "readwrite" indicates the operand will be read from and written to.
* "writeonly" indicates the operand will only be written to.
* "no_broadcast" prevents the operand from being broadcasted.
* "contig" forces the operand data to be contiguous.
* "aligned" forces the operand data to be aligned.
* "nbo" forces the operand data to be in native byte order.
* "copy" allows a temporary read-only copy if required.
* "updateifcopy" allows a temporary read-write copy if required.
* "allocate" causes the array to be allocated if it is None
in the `op` parameter.
* "no_subtype" prevents an "allocate" operand from using a subtype.
* "arraymask" indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* 'writemasked' indicates that only elements where the chosen
'arraymask' operand is True will be written to.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of "allocate" operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as "newaxis".
itershape : tuple of ints, optional
The desired shape of the iterator. This allows "allocate" operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the "delay_bufalloc" flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the "c_index" or
the "f_index" flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the "multi_index" flag,
and the property `multi_index` can be used to retrieve it.
index :
When the "c_index" or "f_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
and `has_index` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews :
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern.
multi_index :
When the "multi_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and `has_multi_index` is False.
ndim : int
The iterator's dimension.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value :
Value of `operands` at current iteration. Normally, this is a
tuple of array scalars, but if the flag "external_loop" is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the Numpy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the iterator's coordinates or index, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol::
def iter_add_py(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
for (a, b, c) in it:
addop(a, b, out=c)
return it.operands[2]
Here is the same function, but following the C-style pattern::
def iter_add(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
while not it.finished:
addop(it[0], it[1], out=it[2])
it.iternext()
return it.operands[2]
Here is an example outer product function::
def outer_it(x, y, out=None):
mulop = np.multiply
it = np.nditer([x, y, out], ['external_loop'],
[['readonly'], ['readonly'], ['writeonly', 'allocate']],
op_axes=[range(x.ndim)+[-1]*y.ndim,
[-1]*x.ndim+range(y.ndim),
None])
for (a, b, c) in it:
mulop(a, b, out=c)
return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc::
def luf(lamdaexpr, *args, **kwargs):
"luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)"
nargs = len(args)
op = (kwargs.get('out',None),) + args
it = np.nditer(op, ['buffered','external_loop'],
[['writeonly','allocate','no_broadcast']] +
[['readonly','nbo','aligned']]*nargs,
order=kwargs.get('order','K'),
casting=kwargs.get('casting','safe'),
buffersize=kwargs.get('buffersize',0))
while not it.finished:
it[0] = lamdaexpr(*it[1:])
it.iternext()
return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> it.next()
(array(0), array(1))
>>> it2 = it.copy()
>>> it2.next()
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[ 5., 6., 7.],
[ 6., 7., 8.],
[ 7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> row.next(), col.next()
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]]
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, copy=True, order=None, subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an
object whose __array__ method returns an array, or any
(nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then
the type will be determined as the minimum type required
to hold the objects in the sequence. This argument can only
be used to 'upcast' the array. For downcasting, use the
.astype(t) method.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy
will only be made if __array__ returns a copy, if obj is a
nested sequence, or if a copy is needed to satisfy any of the other
requirements (`dtype`, `order`, etc.).
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C' (default), then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'F', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest). If order is 'A', then the returned array may
be in any order (either C-, Fortran-contiguous, or even
discontiguous).
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty, empty_like, zeros, zeros_like, ones, ones_like, fill
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""")
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C')
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major) or
Fortran (column-major) order in memory.
See Also
--------
empty_like, zeros, ones
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #random
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #random
""")
add_newdoc('numpy.core.multiarray', 'empty_like',
"""
empty_like(a, dtype=None, order='K', subok=True)
Return a new array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of the
returned array.
dtype : data-type, optional
.. versionadded:: 1.6.0
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
.. versionadded:: 1.6.0
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
""")
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C')
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=numpy.int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""")
add_newdoc('numpy.core.multiarray', 'count_nonzero',
"""
count_nonzero(a)
Counts the number of non-zero values in the array ``a``.
Parameters
----------
a : array_like
The array for which to count non-zeros.
Returns
-------
count : int or array of int
Number of non-zero values in the array.
See Also
--------
nonzero : Return the coordinates of all the non-zero values.
Examples
--------
>>> np.count_nonzero(np.eye(4))
4
>>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]])
5
""")
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='')
A new 1-D array initialized from raw binary or text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
If not provided or, equivalently, the empty string, the data will
be interpreted as binary data; otherwise, as ASCII text with
decimal numbers. Also in this latter case, this argument is
interpreted as the string separating numbers in the data; extra
whitespace between elements is also ignored.
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
>>> np.fromstring('\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, np.float)
array([ 0., 1., 4., 9., 16.])
""")
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='')
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str
Open file object or filename.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', int), ('sec', int)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import os
>>> fname = os.tmpnam()
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
""")
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset; default: 0.
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt)
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = 'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array(['w', 'o', 'r', 'l', 'd'],
dtype='|S1')
""")
add_newdoc('numpy.core.multiarray', 'concatenate',
"""
concatenate((a1, a2, ...), axis=0)
Join a sequence of arrays together.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data = [0 1 2 2 3 4],
mask = False,
fill_value = 999999)
>>> np.ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
""")
add_newdoc('numpy.core', 'inner',
"""
inner(a, b)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions of must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[ 7., 0.],
[ 0., 7.]])
""")
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range <http://docs.python.org/lib/built-in-funcs.html>`_ function,
but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use ``linspace`` for these cases.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified, `start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
linspace : Evenly spaced numbers with careful handling of endpoints.
ogrid: Arrays of evenly spaced numbers in N-dimensions.
mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""")
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NDARRAY_VERSION number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'where',
"""
where(condition, [x, y])
Return elements, either from `x` or `y`, depending on `condition`.
If only `condition` is given, return ``condition.nonzero()``.
Parameters
----------
condition : array_like, bool
When True, yield `x`, otherwise yield `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same
shape as `condition`.
Returns
-------
out : ndarray or tuple of ndarrays
If both `x` and `y` are specified, the output array contains
elements of `x` where `condition` is True, and elements from
`y` elsewhere.
If only `condition` is given, return the tuple
``condition.nonzero()``, the indices where `condition` is True.
See Also
--------
nonzero, choose
Notes
-----
If `x` and `y` are given and input arrays are 1-D, `where` is
equivalent to::
[xv if c else yv for (c,xv,yv) in zip(condition,x,y)]
Examples
--------
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
>>> np.where([[0, 1], [1, 0]])
(array([0, 1]), array([1, 0]))
>>> x = np.arange(9.).reshape(3, 3)
>>> np.where( x > 5 )
(array([2, 2, 2]), array([0, 1, 2]))
>>> x[np.where( x > 3.0 )] # Note: result is 1D.
array([ 4., 5., 6., 7., 8.])
>>> np.where(x < 5, x, -1) # Note: broadcasting.
array([[ 0., 1., 2.],
[ 3., 4., -1.],
[-1., -1., -1.]])
Find the indices of elements of `x` that are in `goodvalues`.
>>> goodvalues = [3, 4, 7]
>>> ix = np.in1d(x.ravel(), goodvalues).reshape(x.shape)
>>> ix
array([[False, False, False],
[ True, True, False],
[False, True, False]], dtype=bool)
>>> np.where(ix)
(array([1, 1, 2]), array([0, 1, 1]))
""")
add_newdoc('numpy.core.multiarray', 'lexsort',
"""
lexsort(keys, axis=-1)
Perform an indirect sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> print ind
[2 0 4 6 5 3 1]
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
""")
add_newdoc('numpy.core.multiarray', 'can_cast',
"""
can_cast(from, totype, casting = 'safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
totype : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, np.complex)
True
>>> np.can_cast(np.complex, np.float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
True
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric and associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i1', 'S8')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: invalid type promotion
""")
add_newdoc('numpy.core.multiarray', 'min_scalar_type',
"""
min_scalar_type(a)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'result_type',
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'newbuffer',
"""
newbuffer(size)
Return a new uninitialized buffer object.
Parameters
----------
size : int
Size in bytes of returned buffer object.
Returns
-------
newbuffer : buffer object
Returned, uninitialized buffer object of `size` bytes.
""")
add_newdoc('numpy.core.multiarray', 'getbuffer',
"""
getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset.
Default is the entire buffer. A read-write buffer is attempted followed
by a read-only buffer.
Parameters
----------
obj : object
offset : int, optional
size : int, optional
Returns
-------
buffer_obj : buffer
Examples
--------
>>> buf = np.getbuffer(np.ones(5), 1, 3)
>>> len(buf)
3
>>> buf[0]
'\\x00'
>>> buf
<read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
""")
add_newdoc('numpy.core', 'dot',
"""
dot(a, b, out=None)
Dot product of two arrays.
For 2-D arrays it is equivalent to matrix multiplication, and for 1-D
arrays to inner product of vectors (without complex conjugation). For
N dimensions it is a sum product over the last axis of `a` and
the second-to-last of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it's the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
""")
add_newdoc('numpy.core', 'einsum',
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe')
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way compute such summations. The best way to understand this
function is to try the examples below, which show how many common NumPy
functions can be implemented as calls to `einsum`.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : data-type, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
dot, inner, outer, tensordot
Notes
-----
.. versionadded:: 1.6.0
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Repeated subscripts labels in one operand take the diagonal. For example,
``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to ``np.inner(a,b)``. If a label appears only once,
it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
with no changes.
The order of labels in the output is by default alphabetical. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose.
The output can be controlled by specifying output subscript labels
as well. This specifies the label order, and allows summing to
be disallowed or forced when desired. The call ``np.einsum('i->', a)``
is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
is like ``np.diag(a)``. The difference is that `einsum` does not
allow broadcasting by default.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, you can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
An alternative way to provide the subscripts and operands is as
``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
below have corresponding `einsum` calls with the two parameter methods.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> c.T
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum('i...->...', a)
array([50, 55, 60, 65, 70])
>>> np.einsum(a, [0,Ellipsis], [Ellipsis])
array([50, 55, 60, 65, 70])
>>> np.sum(a, axis=0)
array([50, 55, 60, 65, 70])
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
""")
add_newdoc('numpy.core', 'alterdot',
"""
Change `dot`, `vdot`, and `inner` to use accelerated BLAS functions.
Typically, as a user of Numpy, you do not explicitly call this function. If
Numpy is built with an accelerated BLAS, this function is automatically
called when Numpy is imported.
When Numpy is built with an accelerated BLAS like ATLAS, these functions
are replaced to make use of the faster implementations. The faster
implementations only affect float32, float64, complex64, and complex128
arrays. Furthermore, the BLAS API only includes matrix-matrix,
matrix-vector, and vector-vector products. Products of arrays with larger
dimensionalities use the built in functions and are not accelerated.
See Also
--------
restoredot : `restoredot` undoes the effects of `alterdot`.
""")
add_newdoc('numpy.core', 'restoredot',
"""
Restore `dot`, `vdot`, and `innerproduct` to the default non-BLAS
implementations.
Typically, the user will only need to call this when troubleshooting and
installation problem, reproducing the conditions of a build without an
accelerated BLAS, or when being very careful about benchmarking linear
algebra operations.
See Also
--------
alterdot : `restoredot` undoes the effects of `alterdot`.
""")
add_newdoc('numpy.core', 'vdot',
"""
vdot(a, b)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
the methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major or column-major order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[ -1.13698227e+002, 4.25087011e-303],
[ 2.88528414e-306, 3.27025015e-309]]) #random
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
"""Allow the array to be interpreted as a ctypes object by returning the
data-memory location as an integer
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
* data: A pointer to the memory area of the array as a Python integer.
This memory area may contain data that is not aligned, or not in correct
byte-order. The memory area may not even be writeable. The array
flags and data-type of this array should be respected when passing this
attribute to arbitrary C-code to avoid trouble that can include Python
crashing. User Beware! The value of this attribute is exactly the same
as self._array_interface_['data'][0].
* shape (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to dtype('p') on this
platform. This base-type could be c_int, c_long, or c_longlong
depending on the platform. The c_intp type is defined accordingly in
numpy.ctypeslib. The ctypes array contains the shape of the underlying
array.
* strides (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array.
* data_as(obj): Return the data pointer cast to a particular c-types object.
For example, calling self._as_parameter_ is equivalent to
self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a
pointer to a ctypes array of floating-point data:
self.data_as(ctypes.POINTER(ctypes.c_double)).
* shape_as(obj): Return the shape tuple as an array of some other c-types
type. For example: self.shape_as(ctypes.c_short).
* strides_as(obj): Return the strides tuple as an array of some other
c-types type. For example: self.strides_as(ctypes.c_longlong).
Be careful using the ctypes attribute - especially on temporary
arrays or arrays constructed on the fly. For example, calling
``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory
that is invalid because the array created as (a+b) is deallocated
before the next Python statement. You can avoid this problem using
either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will
hold a reference to the array until ct is deleted or re-assigned.
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the as parameter attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x
array([[0, 1],
[2, 3]])
>>> x.ctypes.data
30439712
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
<ctypes.LP_c_long object at 0x01F01300>
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
c_long(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
c_longlong(4294967296L)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x01FFD580>
>>> x.ctypes.shape_as(ctypes.c_long)
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides_as(ctypes.c_longlong)
<numpy.core._internal.c_longlong_Array_2 object at 0x01F01300>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
UPDATEIFCOPY (U)
This array is a copy of some other array. When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by
the user, via direct assignment to the attribute or dictionary entry,
or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<type 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
Notes
-----
May be used to "reshape" the array, as long as this would not
require a change in the total number of elements
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equivalent to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
Same as self.transpose(), except that self is returned if
self.ndim < 2.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__([order])
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A'}, optional
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if the array already is in fortran order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__() -> Deep copy of array.
Used if copy.deepcopy is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(version, shape, dtype, isfortran, rawdata)
For unpickling.
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis of `a`.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind='quicksort', order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input paramter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([ 1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> map(hex, A)
['0x1', '0x100', '0x2233']
>>> A.byteswap(True)
array([ 256, 1, 13090], dtype=int16)
>>> map(hex, A)
['0x100', '0x1', '0x3322']
Arrays of strings are not swapped
>>> A = np.array(['ceg', 'fac'])
>>> A.byteswap()
array(['ceg', 'fac'],
dtype='|S3')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(a_min, a_max, out=None)
Return an array whose values are limited to ``[a_min, a_max]``.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[ 2., 2.],
[ 2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[ 8., 8.],
[ 8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str
A string naming the dump file.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([ 1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A'}, optional
Whether to flatten in C (row-major), Fortran (column-major) order,
or preserve the C/Fortran ordering from `a`.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[ 1.+1.j, 0.+0.j],
[ 0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[ 1., 0.],
[ 0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[ 1., 0.],
[ 0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.item(3)
2
>>> x.item(7)
5
>>> x.item((0, 1))
1
>>> x.item((2, 2))
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[3, 1, 7],
[2, 0, 3],
[8, 5, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setasflat',
"""
a.setasflat(arr)
Equivalent to a.flat = arr.flat, but is generally more efficient.
This function does not check for overlap, so if ``arr`` and ``a``
are viewing the same data with different strides, the results will
be unpredictable.
Parameters
----------
arr : array_like
The array to copy into a.
Examples
--------
>>> a = np.arange(2*4).reshape(2,4)[:,:-1]; a
array([[0, 1, 2],
[4, 5, 6]])
>>> b = np.arange(3*3, dtype='f4').reshape(3,3).T[::-1,:-1]; b
array([[ 2., 5.],
[ 1., 4.],
[ 0., 3.]], dtype=float32)
>>> a.setasflat(b)
>>> a
array([[2, 5, 1],
[4, 0, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'may_share_memory',
"""
Determine if two arrays can share memory
The memory-bounds of a and b are computed. If they overlap then
this function returns True. Otherwise, it returns False.
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Parameters
----------
a, b : ndarray
Returns
-------
out : bool
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
above. `new_order` codes can be any of::
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'copyto',
"""
copyto(dst, src, casting='same_kind', where=None, preservena=False)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
preservena : bool, optional
If set to True, leaves any NA values in `dst` untouched. This
is similar to the "hard mask" feature in numpy.ma.
""")
add_newdoc('numpy.core.multiarray', 'putmask',
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
.. note:: The `putmask` functionality is also provided by `copyto`, which
can be significantly faster and in addition is NA-aware
(`preservena` keyword). Replacing `putmask` with
``np.copyto(a, values, where=mask)`` is recommended.
Parameters
----------
a : array_like
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that has been referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]])
>>> x
array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323],
[ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323],
[ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, and UPDATEIFCOPY, respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The UPDATEIFCOPY flag can never be set to True. The flag WRITEABLE
can only be set to True if the array owns its own memory, or the
ultimate owner of the memory exposes a writeable buffer interface,
or is a string. (The exception for string is made so that unpickling
can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 6 Boolean flags
in use, only three of which can be changed by the user:
UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) this array is a copy of some other array (referenced
by .base). When this array is deallocated, the base array will be
updated with the contents of this array.
All flags can be accessed using their first (upper case) letter as well
as the full name.
Examples
--------
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set UPDATEIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind='quicksort', order=None)
Sort an array, in-place.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
See Also
--------
numpy.sort : Return a sorted copy of an array.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in sorted array.
partition: Partial sort.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([('c', 1), ('a', 2)],
dtype=[('x', '|S1'), ('y', '<i4')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(a, 3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove single-dimensional entries from the shape of `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str
An open file object, or a string containing a filename.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tostring())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as a (possibly nested) list.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible Python type.
Parameters
----------
none
Returns
-------
y : list
The possibly nested list of array elements.
Notes
-----
The array may be recreated, ``a = np.array(a.tolist())``.
Examples
--------
>>> a = np.array([1, 2])
>>> a.tolist()
[1, 2]
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring',
"""
a.tostring(order='C')
Construct a Python string containing the raw data bytes in the array.
Constructs a Python string showing a copy of the raw contents of
data memory. The string can be produced in either 'C' or 'Fortran',
or 'Any' order (the default is 'C'-order). 'Any' order means C-order
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
Parameters
----------
order : {'C', 'F', None}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
Returns
-------
s : str
A Python string exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]])
>>> x.tostring()
'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
>>> x.tostring('C') == x.tostring()
True
>>> x.tostring('F')
'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array, this has no effect. (To change between column and
row vectors, first cast the 1-D array into a matrix object.)
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view(dtype=None, type=None)
New view of array with the same data.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16. The
default, None, results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print type(y)
<class 'numpy.matrixlib.defmatrix.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([ 2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> print x
[(1, 20) (3, 4)]
Using a view to convert an array to a record array:
>>> z = x.view(np.recarray)
>>> z.a
array([1], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: new type not compatible with array.
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout)
Takes an arbitrary Python function and returns a Numpy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
Returns
-------
out : ufunc
Returns a Numpy universal function (``ufunc``) object.
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array([012, 036, 0144], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['012', '036', '0144'],
dtype='|S4')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in Numpy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[10000, 0, None]
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[20000, 2, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in Numpy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[10000, 0, None]
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# lib._compiled_base functions
#
##############################################################################
add_newdoc('numpy.lib._compiled_base', 'digitize',
"""
digitize(x, bins, right=False)
Return the indices of the bins to which each value in input array belongs.
Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` if
`bins` is monotonically increasing, or ``bins[i-1] > x >= bins[i]`` if
`bins` is monotonically decreasing. If values in `x` are beyond the
bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate. If right
is True, then the right bin is closed so that the index ``i`` is such
that ``bins[i-1] < x <= bins[i]`` or bins[i-1] >= x > bins[i]`` if `bins`
is monotonically increasing or decreasing, respectively.
Parameters
----------
x : array_like
Input array to be binned. It has to be 1-dimensional.
bins : array_like
Array of bins. It has to be 1-dimensional and monotonic.
right : bool, optional
Indicating whether the intervals include the right or the left bin
edge. Default behavior is (right==False) indicating that the interval
does not include the right edge. The left bin and is open in this
case. Ie., bins[i-1] <= x < bins[i] is the default behavior for
monotonically increasing bins.
Returns
-------
out : ndarray of ints
Output array of indices, of same shape as `x`.
Raises
------
ValueError
If the input is not 1-dimensional, or if `bins` is not monotonic.
TypeError
If the type of the input is complex.
See Also
--------
bincount, histogram, unique
Notes
-----
If values in `x` are such that they fall outside the bin range,
attempting to index `bins` with the indices that `digitize` returns
will result in an IndexError.
Examples
--------
>>> x = np.array([0.2, 6.4, 3.0, 1.6])
>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
>>> inds = np.digitize(x, bins)
>>> inds
array([1, 4, 3, 2])
>>> for n in range(x.size):
... print bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]
...
0.0 <= 0.2 < 1.0
4.0 <= 6.4 < 10.0
2.5 <= 3.0 < 4.0
1.0 <= 1.6 < 2.5
>>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
>>> bins = np.array([0,5,10,15,20])
>>> np.digitize(x,bins,right=True)
array([1, 2, 3, 4, 4])
>>> np.digitize(x,bins,right=False)
array([1, 3, 3, 4, 5])
""")
add_newdoc('numpy.lib._compiled_base', 'bincount',
"""
bincount(x, weights=None, minlength=None)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
.. versionadded:: 1.6.0
A minimum number of bins for the output array.
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is non-positive.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=np.float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
""")
add_newdoc('numpy.lib._compiled_base', 'ravel_multi_index',
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as indexing in
C (row-major) order or FORTRAN (column-major) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
""")
add_newdoc('numpy.lib._compiled_base', 'unravel_index',
"""
unravel_index(indices, dims, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``dims``. Before version 1.6.0,
this function accepted just one index value.
dims : tuple of ints
The shape of the array to use for unraveling ``indices``.
order : {'C', 'F'}, optional
.. versionadded:: 1.6.0
Determines whether the indices should be viewed as indexing in
C (row-major) order or FORTRAN (column-major) order.
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
""")
add_newdoc('numpy.lib._compiled_base', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.lib._compiled_base', 'add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.lib._compiled_base', 'packbits',
"""
packbits(myarray, axis=None)
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
myarray : array_like
An integer type array whose elements should be packed to bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],[64]],[[192],[32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
""")
add_newdoc('numpy.lib._compiled_base', 'unpackbits',
"""
unpackbits(myarray, axis=None)
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `myarray` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is either
1-D (if `axis` is None) or the same shape as the input array with unpacking
done along the axis specified.
Parameters
----------
myarray : ndarray, uint8 type
Input array.
axis : int, optional
Unpacks along this axis.
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in a uint8
array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use np.info(). For
example, np.info(np.sin). Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the "ufuncs.rst"
file in the NumPy reference guide.
Unary ufuncs:
=============
op(X, out=None)
Apply op to X elementwise
Parameters
----------
X : array_like
Input array.
out : array_like
An array to store the output. Must be the same shape as `X`.
Returns
-------
r : array_like
`r` will have the same shape as `X`; if out is provided, `r`
will be equal to out.
Binary ufuncs:
==============
op(X, Y, out=None)
Apply `op` to `X` and `Y` elementwise. May "broadcast" to make
the shapes of `X` and `Y` congruent.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
X : array_like
First input array.
Y : array_like
Second input array.
out : array_like
An array to store the output. Must be the same shape as the
output would have.
Returns
-------
r : array_like
The return value; if out is provided, `r` will be equal to out.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print np.exp.identity
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(a, axis=0, dtype=None, out=None, keepdims=False)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
a : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is `None`, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided, a
freshly-allocated array is returned.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[ 1., 0.],
[ 0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[ 1., 0.],
[ 1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[ 1., 0.],
[ 1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[ 1., 1.],
[ 0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(a, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are two exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = a.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``a[indices[i]]``.
The shape of the output depends on the size of `indices`, and may be
larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
Parameters
----------
a : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
``ufunc.reduceat(a, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
Don't be fooled by this attribute's name: `reduceat(a)` is not
necessarily smaller than `a`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[ 12., 15., 18., 21.],
[ 12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[ 2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
"""
outer(A, B)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
`a[indices] += b`, except that results are accumulated for elements that
are indexed more than once. For example, `a[[0,0]] += 1` will only
increment the first element once because of buffering, whereas
`add.at(a, [0,0], 1)` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> print(a)
array([-1, -2, 3, 4])
::
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> print(a)
array([2, 3, 5, 4])
::
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> print(a)
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(obj, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
obj
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Record, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Record, one field named 'f1', in itself containing a record with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Record, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint), ('f2', np.int32)])
dtype([('f1', '<u4'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', '|S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int,3)),('world',np.void,10)])
dtype([('hello', '<i4', 3), ('world', '|V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', '|S1'), ('age', '|u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', '|S25'), ('age', '|u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
Array-interface compliant full description of the data-type.
The format is that required by the 'descr' key in the
`__array_interface__` attribute.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print dt.fields
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the Numpy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcSUV') identifying the general kind of data.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order
specifications below. The default value ('S') results in
swapping the current byte order.
`new_order` codes can be any of::
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The code does a case-insensitive check on the first letter of
`new_order` for these alternatives. For example, any of '>'
or 'B' or 'b' or 'brian' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False], dtype='bool')
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'is_busday',
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True], dtype='bool')
""")
add_newdoc('numpy.core.multiarray', 'busday_offset',
"""
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
offsets : array_like of int
The array of offsets, which is broadcast with ``dates``.
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
How to treat dates that do not fall on a valid day. The default
is 'raise'.
* 'raise' means to raise an exception for an invalid day.
* 'nat' means to return a NaT (not-a-time) for an invalid day.
* 'forward' and 'following' mean to take the first valid day
later in time.
* 'backward' and 'preceding' mean to take the first valid day
earlier in time.
* 'modifiedfollowing' means to take the first valid day
later in time unless it is across a Month boundary, in which
case to take the first valid day earlier in time.
* 'modifiedpreceding' means to take the first valid day
earlier in time unless it is across a Month boundary, in which
case to take the first valid day later in time.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of datetime64[D], optional
If provided, this array is filled with the result.
Returns
-------
| out : array of datetime64[D] | 23,278 | lcc_e | python | null | 8415cc3776003fa5588524600aadaafdfed4c6921b110ca0 |
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Classes for representing multi-dimensional data with metadata.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
from xml.dom.minidom import Document
import collections
import copy
import datetime
import operator
import warnings
import zlib
import biggus
import numpy as np
import numpy.ma as ma
import iris.analysis
from iris.analysis.cartography import wrap_lons
import iris.analysis.maths
import iris.analysis.interpolate
import iris.aux_factory
import iris.coord_systems
import iris.coords
import iris._concatenate
import iris._constraints
import iris._merge
import iris.exceptions
import iris.util
from iris._cube_coord_common import CFVariableMixin
from functools import reduce
__all__ = ['Cube', 'CubeList', 'CubeMetadata']
class CubeMetadata(collections.namedtuple('CubeMetadata',
['standard_name',
'long_name',
'var_name',
'units',
'attributes',
'cell_methods'])):
"""
Represents the phenomenon metadata for a single :class:`Cube`.
"""
__slots__ = ()
def name(self, default='unknown'):
"""
Returns a human-readable name.
First it tries self.standard_name, then it tries the 'long_name'
attribute, then the 'var_name' attribute, before falling back to
the value of `default` (which itself defaults to 'unknown').
"""
return self.standard_name or self.long_name or self.var_name or default
# The XML namespace to use for CubeML documents
XML_NAMESPACE_URI = "urn:x-iris:cubeml-0.2"
class _CubeFilter(object):
"""
A constraint, paired with a list of cubes matching that constraint.
"""
def __init__(self, constraint, cubes=None):
self.constraint = constraint
if cubes is None:
cubes = CubeList()
self.cubes = cubes
def __len__(self):
return len(self.cubes)
def add(self, cube):
"""
Adds the appropriate (sub)cube to the list of cubes where it
matches the constraint.
"""
sub_cube = self.constraint.extract(cube)
if sub_cube is not None:
self.cubes.append(sub_cube)
def merged(self, unique=False):
"""
Returns a new :class:`_CubeFilter` by merging the list of
cubes.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
"""
return _CubeFilter(self.constraint, self.cubes.merge(unique))
class _CubeFilterCollection(object):
"""
A list of _CubeFilter instances.
"""
@staticmethod
def from_cubes(cubes, constraints=None):
"""
Creates a new collection from an iterable of cubes, and some
optional constraints.
"""
constraints = iris._constraints.list_of_constraints(constraints)
pairs = [_CubeFilter(constraint) for constraint in constraints]
collection = _CubeFilterCollection(pairs)
for cube in cubes:
collection.add_cube(cube)
return collection
def __init__(self, pairs):
self.pairs = pairs
def add_cube(self, cube):
"""
Adds the given :class:`~iris.cube.Cube` to all of the relevant
constraint pairs.
"""
for pair in self.pairs:
pair.add(cube)
def cubes(self):
"""
Returns all the cubes in this collection concatenated into a
single :class:`CubeList`.
"""
result = CubeList()
for pair in self.pairs:
result.extend(pair.cubes)
return result
def merged(self, unique=False):
"""
Returns a new :class:`_CubeFilterCollection` by merging all the cube
lists of this collection.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
"""
return _CubeFilterCollection([pair.merged(unique) for pair in
self.pairs])
class CubeList(list):
"""
All the functionality of a standard :class:`list` with added "Cube"
context.
"""
def __new__(cls, list_of_cubes=None):
"""Given a :class:`list` of cubes, return a CubeList instance."""
cube_list = list.__new__(cls, list_of_cubes)
# Check that all items in the incoming list are cubes. Note that this
# checking does not guarantee that a CubeList instance *always* has
# just cubes in its list as the append & __getitem__ methods have not
# been overridden.
if not all([isinstance(cube, Cube) for cube in cube_list]):
raise ValueError('All items in list_of_cubes must be Cube '
'instances.')
return cube_list
def __str__(self):
"""Runs short :meth:`Cube.summary` on every cube."""
result = ['%s: %s' % (i, cube.summary(shorten=True)) for i, cube in
enumerate(self)]
if result:
result = '\n'.join(result)
else:
result = '< No cubes >'
return result
def __repr__(self):
"""Runs repr on every cube."""
return '[%s]' % ',\n'.join([repr(cube) for cube in self])
# TODO #370 Which operators need overloads?
def __add__(self, other):
return CubeList(list.__add__(self, other))
def __getitem__(self, keys):
"""x.__getitem__(y) <==> x[y]"""
result = super(CubeList, self).__getitem__(keys)
if isinstance(result, list):
result = CubeList(result)
return result
def __getslice__(self, start, stop):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
result = super(CubeList, self).__getslice__(start, stop)
result = CubeList(result)
return result
def xml(self, checksum=False, order=True, byteorder=True):
"""Return a string of the XML that this list of cubes represents."""
doc = Document()
cubes_xml_element = doc.createElement("cubes")
cubes_xml_element.setAttribute("xmlns", XML_NAMESPACE_URI)
for cube_obj in self:
cubes_xml_element.appendChild(
cube_obj._xml_element(
doc, checksum=checksum, order=order, byteorder=byteorder))
doc.appendChild(cubes_xml_element)
# return our newly created XML string
return doc.toprettyxml(indent=" ")
def extract(self, constraints, strict=False):
"""
Filter each of the cubes which can be filtered by the given
constraints.
This method iterates over each constraint given, and subsets each of
the cubes in this CubeList where possible. Thus, a CubeList of length
**n** when filtered with **m** constraints can generate a maximum of
**m * n** cubes.
Keywords:
* strict - boolean
If strict is True, then there must be exactly one cube which is
filtered per constraint.
"""
return self._extract_and_merge(self, constraints, strict,
merge_unique=None)
@staticmethod
def _extract_and_merge(cubes, constraints, strict, merge_unique=False):
# * merge_unique - if None: no merging, if false: non unique merging,
# else unique merging (see merge)
constraints = iris._constraints.list_of_constraints(constraints)
# group the resultant cubes by constraints in a dictionary
constraint_groups = dict([(constraint, CubeList()) for constraint in
constraints])
for cube in cubes:
for constraint, cube_list in six.iteritems(constraint_groups):
sub_cube = constraint.extract(cube)
if sub_cube is not None:
cube_list.append(sub_cube)
if merge_unique is not None:
for constraint, cubelist in six.iteritems(constraint_groups):
constraint_groups[constraint] = cubelist.merge(merge_unique)
result = CubeList()
for constraint in constraints:
constraint_cubes = constraint_groups[constraint]
if strict and len(constraint_cubes) != 1:
msg = 'Got %s cubes for constraint %r, ' \
'expecting 1.' % (len(constraint_cubes), constraint)
raise iris.exceptions.ConstraintMismatchError(msg)
result.extend(constraint_cubes)
if strict and len(constraints) == 1:
result = result[0]
return result
def extract_strict(self, constraints):
"""
Calls :meth:`CubeList.extract` with the strict keyword set to True.
"""
return self.extract(constraints, strict=True)
def extract_overlapping(self, coord_names):
"""
Returns a :class:`CubeList` of cubes extracted over regions
where the coordinates overlap, for the coordinates
in coord_names.
Args:
* coord_names:
A string or list of strings of the names of the coordinates
over which to perform the extraction.
"""
if isinstance(coord_names, six.string_types):
coord_names = [coord_names]
def make_overlap_fn(coord_name):
def overlap_fn(cell):
return all(cell in cube.coord(coord_name).cells()
for cube in self)
return overlap_fn
coord_values = {coord_name: make_overlap_fn(coord_name)
for coord_name in coord_names}
return self.extract(iris.Constraint(coord_values=coord_values))
def merge_cube(self):
"""
Return the merged contents of the :class:`CubeList` as a single
:class:`Cube`.
If it is not possible to merge the `CubeList` into a single
`Cube`, a :class:`~iris.exceptions.MergeError` will be raised
describing the reason for the failure.
For example:
>>> cube_1 = iris.cube.Cube([1, 2])
>>> cube_1.add_aux_coord(iris.coords.AuxCoord(0, long_name='x'))
>>> cube_2 = iris.cube.Cube([3, 4])
>>> cube_2.add_aux_coord(iris.coords.AuxCoord(1, long_name='x'))
>>> cube_2.add_dim_coord(
... iris.coords.DimCoord([0, 1], long_name='z'), 0)
>>> single_cube = iris.cube.CubeList([cube_1, cube_2]).merge_cube()
Traceback (most recent call last):
...
iris.exceptions.MergeError: failed to merge into a single cube.
Coordinates in cube.dim_coords differ: z.
Coordinate-to-dimension mapping differs for cube.dim_coords.
"""
if not self:
raise ValueError("can't merge an empty CubeList")
# Register each of our cubes with a single ProtoCube.
proto_cube = iris._merge.ProtoCube(self[0])
for cube in self[1:]:
proto_cube.register(cube, error_on_mismatch=True)
# Extract the merged cube from the ProtoCube.
merged_cube, = proto_cube.merge()
return merged_cube
def merge(self, unique=True):
"""
Returns the :class:`CubeList` resulting from merging this
:class:`CubeList`.
Kwargs:
* unique:
If True, raises `iris.exceptions.DuplicateDataError` if
duplicate cubes are detected.
This combines cubes with different values of an auxiliary scalar
coordinate, by constructing a new dimension.
.. testsetup::
import iris
c1 = iris.cube.Cube([0,1,2], long_name='some_parameter')
xco = iris.coords.DimCoord([11, 12, 13], long_name='x_vals')
c1.add_dim_coord(xco, 0)
c1.add_aux_coord(iris.coords.AuxCoord([100], long_name='y_vals'))
c2 = c1.copy()
c2.coord('y_vals').points = [200]
For example::
>>> print(c1)
some_parameter / (unknown) (x_vals: 3)
Dimension coordinates:
x_vals x
Scalar coordinates:
y_vals: 100
>>> print(c2)
some_parameter / (unknown) (x_vals: 3)
Dimension coordinates:
x_vals x
Scalar coordinates:
y_vals: 200
>>> cube_list = iris.cube.CubeList([c1, c2])
>>> new_cube = cube_list.merge()[0]
>>> print(new_cube)
some_parameter / (unknown) (y_vals: 2; x_vals: 3)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(new_cube.coord('y_vals').points)
[100 200]
>>>
Contrast this with :meth:`iris.cube.CubeList.concatenate`, which joins
cubes along an existing dimension.
.. note::
If time coordinates in the list of cubes have differing epochs then
the cubes will not be able to be merged. If this occurs, use
:func:`iris.util.unify_time_units` to normalise the epochs of the
time coordinates so that the cubes can be merged.
"""
# Register each of our cubes with its appropriate ProtoCube.
proto_cubes_by_name = {}
for cube in self:
name = cube.standard_name
proto_cubes = proto_cubes_by_name.setdefault(name, [])
proto_cube = None
for target_proto_cube in proto_cubes:
if target_proto_cube.register(cube):
proto_cube = target_proto_cube
break
if proto_cube is None:
proto_cube = iris._merge.ProtoCube(cube)
proto_cubes.append(proto_cube)
# Emulate Python 2 behaviour.
def _none_sort(item):
return (item is not None, item)
# Extract all the merged cubes from the ProtoCubes.
merged_cubes = CubeList()
for name in sorted(proto_cubes_by_name, key=_none_sort):
for proto_cube in proto_cubes_by_name[name]:
merged_cubes.extend(proto_cube.merge(unique=unique))
return merged_cubes
def concatenate_cube(self):
"""
Return the concatenated contents of the :class:`CubeList` as a single
:class:`Cube`.
If it is not possible to concatenate the `CubeList` into a single
`Cube`, a :class:`~iris.exceptions.ConcatenateError` will be raised
describing the reason for the failure.
"""
if not self:
raise ValueError("can't concatenate an empty CubeList")
names = [cube.metadata.name() for cube in self]
unique_names = list(collections.OrderedDict.fromkeys(names))
if len(unique_names) == 1:
res = iris._concatenate.concatenate(self, error_on_mismatch=True)
n_res_cubes = len(res)
if n_res_cubes == 1:
return res[0]
else:
msgs = []
msgs.append('An unexpected problem prevented concatenation.')
msgs.append('Expected only a single cube, '
'found {}.'.format(n_res_cubes))
raise iris.exceptions.ConcatenateError(msgs)
else:
msgs = []
msgs.append('Cube names differ: {} != {}'.format(names[0],
names[1]))
raise iris.exceptions.ConcatenateError(msgs)
def concatenate(self):
"""
Concatenate the cubes over their common dimensions.
Returns:
A new :class:`iris.cube.CubeList` of concatenated
:class:`iris.cube.Cube` instances.
This combines cubes with a common dimension coordinate, but occupying
different regions of the coordinate value. The cubes are joined across
that dimension.
.. testsetup::
import iris
import numpy as np
xco = iris.coords.DimCoord([11, 12, 13, 14], long_name='x_vals')
yco1 = iris.coords.DimCoord([4, 5], long_name='y_vals')
yco2 = iris.coords.DimCoord([7, 9, 10], long_name='y_vals')
c1 = iris.cube.Cube(np.zeros((2,4)), long_name='some_parameter')
c1.add_dim_coord(xco, 1)
c1.add_dim_coord(yco1, 0)
c2 = iris.cube.Cube(np.zeros((3,4)), long_name='some_parameter')
c2.add_dim_coord(xco, 1)
c2.add_dim_coord(yco2, 0)
For example::
>>> print(c1)
some_parameter / (unknown) (y_vals: 2; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(c1.coord('y_vals').points)
[4 5]
>>> print(c2)
some_parameter / (unknown) (y_vals: 3; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(c2.coord('y_vals').points)
[ 7 9 10]
>>> cube_list = iris.cube.CubeList([c1, c2])
>>> new_cube = cube_list.concatenate()[0]
>>> print(new_cube)
some_parameter / (unknown) (y_vals: 5; x_vals: 4)
Dimension coordinates:
y_vals x -
x_vals - x
>>> print(new_cube.coord('y_vals').points)
[ 4 5 7 9 10]
>>>
Contrast this with :meth:`iris.cube.CubeList.merge`, which makes a new
dimension from values of an auxiliary scalar coordinate.
.. note::
If time coordinates in the list of cubes have differing epochs then
the cubes will not be able to be concatenated. If this occurs, use
:func:`iris.util.unify_time_units` to normalise the epochs of the
time coordinates so that the cubes can be concatenated.
"""
return iris._concatenate.concatenate(self)
def _is_single_item(testee):
"""
Return whether this is a single item, rather than an iterable.
We count string types as 'single', also.
"""
return (isinstance(testee, six.string_types)
or not isinstance(testee, collections.Iterable))
class Cube(CFVariableMixin):
"""
A single Iris cube of data and metadata.
Typically obtained from :func:`iris.load`, :func:`iris.load_cube`,
:func:`iris.load_cubes`, or from the manipulation of existing cubes.
For example:
>>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
>>> print(cube)
air_temperature / (K) (latitude: 73; longitude: 96)
Dimension coordinates:
latitude x -
longitude - x
Scalar coordinates:
forecast_period: 6477 hours, bound=(-28083.0, 6477.0) hours
forecast_reference_time: 1998-03-01 03:00:00
pressure: 1000.0 hPa
time: 1998-12-01 00:00:00, \
bound=(1994-12-01 00:00:00, 1998-12-01 00:00:00)
Attributes:
STASH: m01s16i203
source: Data from Met Office Unified Model
Cell methods:
mean within years: time
mean over years: time
See the :doc:`user guide</userguide/index>` for more information.
"""
#: Indicates to client code that the object supports
#: "orthogonal indexing", which means that slices that are 1d arrays
#: or lists slice along each dimension independently. This behavior
#: is similar to Fortran or Matlab, but different than numpy.
__orthogonal_indexing__ = True
def __init__(self, data, standard_name=None, long_name=None,
var_name=None, units=None, attributes=None,
cell_methods=None, dim_coords_and_dims=None,
aux_coords_and_dims=None, aux_factories=None):
"""
Creates a cube with data and optional metadata.
Not typically used - normally cubes are obtained by loading data
(e.g. :func:`iris.load`) or from manipulating existing cubes.
Args:
* data
This object defines the shape of the cube and the phenomenon
value in each cell.
It can be a biggus array, a numpy array, a numpy array
subclass (such as :class:`numpy.ma.MaskedArray`), or an
*array_like* as described in :func:`numpy.asarray`.
See :attr:`Cube.data<iris.cube.Cube.data>`.
Kwargs:
* standard_name
The standard name for the Cube's data.
* long_name
An unconstrained description of the cube.
* var_name
The CF variable name for the cube.
* units
The unit of the cube, e.g. ``"m s-1"`` or ``"kelvin"``.
* attributes
A dictionary of cube attributes
* cell_methods
A tuple of CellMethod objects, generally set by Iris, e.g.
``(CellMethod("mean", coords='latitude'), )``.
* dim_coords_and_dims
A list of coordinates with scalar dimension mappings, e.g
``[(lat_coord, 0), (lon_coord, 1)]``.
* aux_coords_and_dims
A list of coordinates with dimension mappings,
e.g ``[(lat_coord, 0), (lon_coord, (0, 1))]``.
See also :meth:`Cube.add_dim_coord()<iris.cube.Cube.add_dim_coord>`
and :meth:`Cube.add_aux_coord()<iris.cube.Cube.add_aux_coord>`.
* aux_factories
A list of auxiliary coordinate factories. See
:mod:`iris.aux_factory`.
For example::
>>> from iris.coords import DimCoord
>>> from iris.cube import Cube
>>> latitude = DimCoord(np.linspace(-90, 90, 4),
... standard_name='latitude',
... units='degrees')
>>> longitude = DimCoord(np.linspace(45, 360, 8),
... standard_name='longitude',
... units='degrees')
>>> cube = Cube(np.zeros((4, 8), np.float32),
... dim_coords_and_dims=[(latitude, 0),
... (longitude, 1)])
"""
# Temporary error while we transition the API.
if isinstance(data, six.string_types):
raise TypeError('Invalid data type: {!r}.'.format(data))
if not isinstance(data, (biggus.Array, ma.MaskedArray)):
data = np.asarray(data)
self._my_data = data
#: The "standard name" for the Cube's phenomenon.
self.standard_name = standard_name
#: An instance of :class:`cf_units.Unit` describing the Cube's data.
self.units = units
#: The "long name" for the Cube's phenomenon.
self.long_name = long_name
#: The CF variable name for the Cube.
self.var_name = var_name
self.cell_methods = cell_methods
#: A dictionary, with a few restricted keys, for arbitrary
#: Cube metadata.
self.attributes = attributes
# Coords
self._dim_coords_and_dims = []
self._aux_coords_and_dims = []
self._aux_factories = []
identities = set()
if dim_coords_and_dims:
dims = set()
for coord, dim in dim_coords_and_dims:
identity = coord.standard_name, coord.long_name
if identity not in identities and dim not in dims:
self._add_unique_dim_coord(coord, dim)
else:
self.add_dim_coord(coord, dim)
identities.add(identity)
dims.add(dim)
if aux_coords_and_dims:
for coord, dims in aux_coords_and_dims:
identity = coord.standard_name, coord.long_name
if identity not in identities:
self._add_unique_aux_coord(coord, dims)
else:
self.add_aux_coord(coord, dims)
identities.add(identity)
if aux_factories:
for factory in aux_factories:
self.add_aux_factory(factory)
@property
def metadata(self):
"""
An instance of :class:`CubeMetadata` describing the phenomenon.
This property can be updated with any of:
- another :class:`CubeMetadata` instance,
- a tuple/dict which can be used to make a :class:`CubeMetadata`,
- or any object providing the attributes exposed by
:class:`CubeMetadata`.
"""
return CubeMetadata(self.standard_name, self.long_name, self.var_name,
self.units, self.attributes, self.cell_methods)
@metadata.setter
def metadata(self, value):
try:
value = CubeMetadata(**value)
except TypeError:
try:
value = CubeMetadata(*value)
except TypeError:
missing_attrs = [field for field in CubeMetadata._fields
if not hasattr(value, field)]
if missing_attrs:
raise TypeError('Invalid/incomplete metadata')
for name in CubeMetadata._fields:
setattr(self, name, getattr(value, name))
def is_compatible(self, other, ignore=None):
"""
Return whether the cube is compatible with another.
Compatibility is determined by comparing :meth:`iris.cube.Cube.name()`,
:attr:`iris.cube.Cube.units`, :attr:`iris.cube.Cube.cell_methods` and
:attr:`iris.cube.Cube.attributes` that are present in both objects.
Args:
* other:
An instance of :class:`iris.cube.Cube` or
:class:`iris.cube.CubeMetadata`.
* ignore:
A single attribute key or iterable of attribute keys to ignore when
comparing the cubes. Default is None. To ignore all attributes set
this to other.attributes.
Returns:
Boolean.
.. seealso::
:meth:`iris.util.describe_diff()`
.. note::
This function does not indicate whether the two cubes can be
merged, instead it checks only the four items quoted above for
equality. Determining whether two cubes will merge requires
additional logic that is beyond the scope of this method.
"""
compatible = (self.name() == other.name() and
self.units == other.units and
self.cell_methods == other.cell_methods)
if compatible:
common_keys = set(self.attributes).intersection(other.attributes)
if ignore is not None:
if isinstance(ignore, six.string_types):
ignore = (ignore,)
common_keys = common_keys.difference(ignore)
for key in common_keys:
if np.any(self.attributes[key] != other.attributes[key]):
compatible = False
break
return compatible
def convert_units(self, unit):
"""
Change the cube's units, converting the values in the data array.
For example, if a cube's :attr:`~iris.cube.Cube.units` are
kelvin then::
cube.convert_units('celsius')
will change the cube's :attr:`~iris.cube.Cube.units` attribute to
celsius and subtract 273.15 from each value in
:attr:`~iris.cube.Cube.data`.
.. warning::
Calling this method will trigger any deferred loading, causing
the cube's data array to be loaded into memory.
"""
# If the cube has units convert the data.
if not self.units.is_unknown():
self.data = self.units.convert(self.data, unit)
self.units = unit
def add_cell_method(self, cell_method):
"""Add a CellMethod to the Cube."""
self.cell_methods += (cell_method, )
def add_aux_coord(self, coord, data_dims=None):
"""
Adds a CF auxiliary coordinate to the cube.
Args:
* coord
The :class:`iris.coords.DimCoord` or :class:`iris.coords.AuxCoord`
instance to add to the cube.
Kwargs:
* data_dims
Integer or iterable of integers giving the data dimensions spanned
by the coordinate.
Raises a ValueError if a coordinate with identical metadata already
exists on the cube.
See also :meth:`Cube.remove_coord()<iris.cube.Cube.remove_coord>`.
"""
if self.coords(coord): # TODO: just fail on duplicate object
raise ValueError('Duplicate coordinates are not permitted.')
self._add_unique_aux_coord(coord, data_dims)
def _add_unique_aux_coord(self, coord, data_dims):
# Convert to a tuple of integers
if data_dims is None:
data_dims = tuple()
elif isinstance(data_dims, collections.Container):
data_dims = tuple(int(d) for d in data_dims)
else:
data_dims = (int(data_dims),)
if data_dims:
if len(data_dims) != coord.ndim:
msg = 'Invalid data dimensions: {} given, {} expected for ' \
'{!r}.'.format(len(data_dims), coord.ndim, coord.name())
raise ValueError(msg)
# Check compatibility with the shape of the data
for i, dim in enumerate(data_dims):
if coord.shape[i] != self.shape[dim]:
msg = 'Unequal lengths. Cube dimension {} => {};' \
' coord {!r} dimension {} => {}.'
raise ValueError(msg.format(dim, self.shape[dim],
coord.name(), i,
coord.shape[i]))
elif coord.shape != (1,):
raise ValueError('Missing data dimensions for multi-valued'
' coordinate {!r}'.format(coord.name()))
self._aux_coords_and_dims.append([coord, data_dims])
def add_aux_factory(self, aux_factory):
"""
Adds an auxiliary coordinate factory to the cube.
Args:
* aux_factory
The :class:`iris.aux_factory.AuxCoordFactory` instance to add.
"""
if not isinstance(aux_factory, iris.aux_factory.AuxCoordFactory):
raise TypeError('Factory must be a subclass of '
'iris.aux_factory.AuxCoordFactory.')
self._aux_factories.append(aux_factory)
def add_dim_coord(self, dim_coord, data_dim):
"""
Add a CF coordinate to the cube.
Args:
* dim_coord
The :class:`iris.coords.DimCoord` instance to add to the cube.
* data_dim
Integer giving the data dimension spanned by the coordinate.
Raises a ValueError if a coordinate with identical metadata already
exists on the cube or if a coord already exists for the
given dimension.
See also :meth:`Cube.remove_coord()<iris.cube.Cube.remove_coord>`.
"""
if self.coords(dim_coord):
raise ValueError('The coordinate already exists on the cube. '
'Duplicate coordinates are not permitted.')
# Check dimension is available
if self.coords(dimensions=data_dim, dim_coords=True):
raise ValueError('A dim_coord is already associated with '
'dimension %d.' % data_dim)
self._add_unique_dim_coord(dim_coord, data_dim)
def _add_unique_dim_coord(self, dim_coord, data_dim):
if isinstance(dim_coord, iris.coords.AuxCoord):
raise ValueError('The dim_coord may not be an AuxCoord instance.')
# Convert data_dim to a single integer
if isinstance(data_dim, collections.Container):
if len(data_dim) != 1:
raise ValueError('The supplied data dimension must be a'
' single number.')
data_dim = int(list(data_dim)[0])
else:
data_dim = int(data_dim)
# Check data_dim value is valid
if data_dim < 0 or data_dim >= self.ndim:
raise ValueError('The cube does not have the specified dimension '
'(%d)' % data_dim)
# Check compatibility with the shape of the data
if dim_coord.shape[0] != self.shape[data_dim]:
msg = 'Unequal lengths. Cube dimension {} => {}; coord {!r} => {}.'
raise ValueError(msg.format(data_dim, self.shape[data_dim],
dim_coord.name(),
len(dim_coord.points)))
self._dim_coords_and_dims.append([dim_coord, int(data_dim)])
def remove_aux_factory(self, aux_factory):
"""Removes the given auxiliary coordinate factory from the cube."""
self._aux_factories.remove(aux_factory)
def _remove_coord(self, coord):
self._dim_coords_and_dims = [(coord_, dim) for coord_, dim in
self._dim_coords_and_dims if coord_
is not coord]
self._aux_coords_and_dims = [(coord_, dims) for coord_, dims in
self._aux_coords_and_dims if coord_
is not coord]
def remove_coord(self, coord):
"""
Removes a coordinate from the cube.
Args:
* coord (string or coord)
The (name of the) coordinate to remove from the cube.
See also :meth:`Cube.add_dim_coord()<iris.cube.Cube.add_dim_coord>`
and :meth:`Cube.add_aux_coord()<iris.cube.Cube.add_aux_coord>`.
"""
coord = self.coord(coord)
self._remove_coord(coord)
for factory in self.aux_factories:
factory.update(coord)
def replace_coord(self, new_coord):
"""
Replace the coordinate whose metadata matches the given coordinate.
"""
old_coord = self.coord(new_coord)
dims = self.coord_dims(old_coord)
was_dimensioned = old_coord in self.dim_coords
self._remove_coord(old_coord)
if was_dimensioned and isinstance(new_coord, iris.coords.DimCoord):
self.add_dim_coord(new_coord, dims[0])
else:
self.add_aux_coord(new_coord, dims)
for factory in self.aux_factories:
factory.update(old_coord, new_coord)
def coord_dims(self, coord):
"""
Returns a tuple of the data dimensions relevant to the given
coordinate.
When searching for the given coordinate in the cube the comparison is
made using coordinate metadata equality. Hence the given coordinate
instance need not exist on the cube, and may contain different
coordinate values.
Args:
* coord (string or coord)
The (name of the) coord to look for.
"""
coord = self.coord(coord)
# Search for existing coordinate (object) on the cube, faster lookup
# than equality - makes no functional difference.
matches = [(dim,) for coord_, dim in self._dim_coords_and_dims if
coord_ is coord]
if not matches:
matches = [dims for coord_, dims in self._aux_coords_and_dims if
coord_ is coord]
# Search derived aux coords
target_defn = coord._as_defn()
if not matches:
match = lambda factory: factory._as_defn() == target_defn
factories = filter(match, self._aux_factories)
matches = [factory.derived_dims(self.coord_dims) for factory in
factories]
if not matches:
raise iris.exceptions.CoordinateNotFoundError(coord.name())
return matches[0]
def aux_factory(self, name=None, standard_name=None, long_name=None,
var_name=None):
"""
Returns the single coordinate factory that matches the criteria,
or raises an error if not found.
Kwargs:
* name
If not None, matches against factory.name().
* standard_name
The CF standard name of the desired coordinate factory.
If None, does not check for standard name.
* long_name
An unconstrained description of the coordinate factory.
If None, does not check for long_name.
* var_name
The CF variable name of the desired coordinate factory.
If None, does not check for var_name.
.. note::
If the arguments given do not result in precisely 1 coordinate
factory being matched, an
:class:`iris.exceptions.CoordinateNotFoundError` is raised.
"""
factories = self.aux_factories
if name is not None:
factories = [factory for factory in factories if
factory.name() == name]
if standard_name is not None:
factories = [factory for factory in factories if
factory.standard_name == standard_name]
if long_name is not None:
factories = [factory for factory in factories if
factory.long_name == long_name]
if var_name is not None:
factories = [factory for factory in factories if
factory.var_name == var_name]
if len(factories) > 1:
factory_names = (factory.name() for factory in factories)
msg = 'Expected to find exactly one coordinate factory, but ' \
'found {}. They were: {}.'.format(len(factories),
', '.join(factory_names))
raise iris.exceptions.CoordinateNotFoundError(msg)
elif len(factories) == 0:
msg = 'Expected to find exactly one coordinate factory, but ' \
'found none.'
raise iris.exceptions.CoordinateNotFoundError(msg)
return factories[0]
def coords(self, name_or_coord=None, standard_name=None,
long_name=None, var_name=None, attributes=None, axis=None,
contains_dimension=None, dimensions=None, coord=None,
coord_system=None, dim_coords=None, name=None):
"""
Return a list of coordinates in this cube fitting the given criteria.
Kwargs:
* name_or_coord
Either
(a) a :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name`. Defaults to value of `default`
(which itself defaults to `unknown`) as defined in
:class:`iris._cube_coord_common.CFVariableMixin`.
(b) a coordinate instance with metadata equal to that of
the desired coordinates. Accepts either a
:class:`iris.coords.DimCoord`, :class:`iris.coords.AuxCoord`,
:class:`iris.aux_factory.AuxCoordFactory`
or :class:`iris.coords.CoordDefn`.
* name
.. deprecated:: 1.6. Please use the name_or_coord kwarg.
* standard_name
The CF standard name of the desired coordinate. If None, does not
check for standard name.
* long_name
An unconstrained description of the coordinate. If None, does not
check for long_name.
* var_name
The CF variable name of the desired coordinate. If None, does not
check for var_name.
* attributes
A dictionary of attributes desired on the coordinates. If None,
does not check for attributes.
* axis
The desired coordinate axis, see
:func:`iris.util.guess_coord_axis`. If None, does not check for
axis. Accepts the values 'X', 'Y', 'Z' and 'T' (case-insensitive).
* contains_dimension
The desired coordinate contains the data dimension. If None, does
not check for the dimension.
* dimensions
The exact data dimensions of the desired coordinate. Coordinates
with no data dimension can be found with an empty tuple or list
(i.e. ``()`` or ``[]``). If None, does not check for dimensions.
* coord
.. deprecated:: 1.6. Please use the name_or_coord kwarg.
* coord_system
Whether the desired coordinates have coordinate systems equal to
the given coordinate system. If None, no check is done.
* dim_coords
Set to True to only return coordinates that are the cube's
dimension coordinates. Set to False to only return coordinates
that are the cube's auxiliary and derived coordinates. If None,
returns all coordinates.
See also :meth:`Cube.coord()<iris.cube.Cube.coord>`.
"""
# Handle deprecated kwargs
if name is not None:
name_or_coord = name
warnings.warn('the name kwarg is deprecated and will be removed '
'in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
if coord is not None:
name_or_coord = coord
warnings.warn('the coord kwarg is deprecated and will be removed '
'in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
# Finish handling deprecated kwargs
name = None
coord = None
if isinstance(name_or_coord, six.string_types):
name = name_or_coord
else:
coord = name_or_coord
coords_and_factories = []
if dim_coords in [True, None]:
coords_and_factories += list(self.dim_coords)
if dim_coords in [False, None]:
coords_and_factories += list(self.aux_coords)
coords_and_factories += list(self.aux_factories)
if name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.name() == name]
if standard_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.standard_name == standard_name]
if long_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.long_name == long_name]
if var_name is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.var_name == var_name]
if axis is not None:
axis = axis.upper()
guess_axis = iris.util.guess_coord_axis
coords_and_factories = [coord_ for coord_ in coords_and_factories
if guess_axis(coord_) == axis]
if attributes is not None:
if not isinstance(attributes, collections.Mapping):
msg = 'The attributes keyword was expecting a dictionary ' \
'type, but got a %s instead.' % type(attributes)
raise ValueError(msg)
attr_filter = lambda coord_: all(k in coord_.attributes and
coord_.attributes[k] == v for
k, v in six.iteritems(attributes))
coords_and_factories = [coord_ for coord_ in coords_and_factories
if attr_filter(coord_)]
if coord_system is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_.coord_system == coord_system]
if coord is not None:
if isinstance(coord, iris.coords.CoordDefn):
defn = coord
else:
defn = coord._as_defn()
coords_and_factories = [coord_ for coord_ in coords_and_factories
if coord_._as_defn() == defn]
if contains_dimension is not None:
coords_and_factories = [coord_ for coord_ in coords_and_factories
if contains_dimension in
self.coord_dims(coord_)]
if dimensions is not None:
if not isinstance(dimensions, collections.Container):
dimensions = [dimensions]
dimensions = tuple(dimensions)
coords_and_factories = [coord_ for coord_ in coords_and_factories
if self.coord_dims(coord_) == dimensions]
# If any factories remain after the above filters we have to make the
# coords so they can be returned
def extract_coord(coord_or_factory):
if isinstance(coord_or_factory, iris.aux_factory.AuxCoordFactory):
coord = coord_or_factory.make_coord(self.coord_dims)
elif isinstance(coord_or_factory, iris.coords.Coord):
coord = coord_or_factory
else:
msg = 'Expected Coord or AuxCoordFactory, got ' \
'{!r}.'.format(type(coord_or_factory))
raise ValueError(msg)
return coord
coords = [extract_coord(coord_or_factory) for coord_or_factory in
coords_and_factories]
return coords
def coord(self, name_or_coord=None, standard_name=None,
long_name=None, var_name=None, attributes=None, axis=None,
contains_dimension=None, dimensions=None, coord=None,
coord_system=None, dim_coords=None, name=None):
"""
Return a single coord given the same arguments as :meth:`Cube.coords`.
.. note::
If the arguments given do not result in precisely 1 coordinate
being matched, an :class:`iris.exceptions.CoordinateNotFoundError`
is raised.
.. seealso::
:meth:`Cube.coords()<iris.cube.Cube.coords>` for full keyword
documentation.
"""
# Handle deprecated kwargs
if name is not None:
name_or_coord = name
warnings.warn('the name kwarg is deprecated and will be removed '
'in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
if coord is not None:
name_or_coord = coord
warnings.warn('the coord kwarg is deprecated and will be removed '
'in a future release. Consider converting '
'existing code to use the name_or_coord '
'kwarg as a replacement.',
stacklevel=2)
# Finish handling deprecated kwargs
coords = self.coords(name_or_coord=name_or_coord,
standard_name=standard_name,
long_name=long_name, var_name=var_name,
attributes=attributes, axis=axis,
contains_dimension=contains_dimension,
dimensions=dimensions,
coord_system=coord_system,
dim_coords=dim_coords)
if len(coords) > 1:
msg = 'Expected to find exactly 1 coordinate, but found %s. ' \
'They were: %s.' % (len(coords), ', '.join(coord.name() for
coord in coords))
raise iris.exceptions.CoordinateNotFoundError(msg)
elif len(coords) == 0:
bad_name = name or standard_name or long_name or \
(coord and coord.name()) or ''
msg = 'Expected to find exactly 1 %s coordinate, but found ' \
'none.' % bad_name
raise iris.exceptions.CoordinateNotFoundError(msg)
return coords[0]
def coord_system(self, spec=None):
"""
Find the coordinate system of the given type.
If no target coordinate system is provided then find
any available coordinate system.
Kwargs:
* spec:
The the name or type of a coordinate system subclass.
E.g. ::
cube.coord_system("GeogCS")
cube.coord_system(iris.coord_systems.GeogCS)
If spec is provided as a type it can be a superclass of
any coordinate system found.
If spec is None, then find any available coordinate
systems within the :class:`iris.cube.Cube`.
Returns:
The :class:`iris.coord_systems.CoordSystem` or None.
"""
if isinstance(spec, six.string_types) or spec is None:
spec_name = spec
else:
msg = "type %s is not a subclass of CoordSystem" % spec
assert issubclass(spec, iris.coord_systems.CoordSystem), msg
spec_name = spec.__name__
# Gather a temporary list of our unique CoordSystems.
coord_systems = ClassDict(iris.coord_systems.CoordSystem)
for coord in self.coords():
if coord.coord_system:
coord_systems.add(coord.coord_system, replace=True)
result = None
if spec_name is None:
for key in sorted(coord_systems.keys()):
result = coord_systems[key]
break
else:
result = coord_systems.get(spec_name)
return result
@property
def cell_methods(self):
"""
Tuple of :class:`iris.coords.CellMethod` representing the processing
done on the phenomenon.
"""
return self._cell_methods
@cell_methods.setter
def cell_methods(self, cell_methods):
self._cell_methods = tuple(cell_methods) if cell_methods else tuple()
@property
def shape(self):
"""The shape of the data of this cube."""
shape = self.lazy_data().shape
return shape
@property
def dtype(self):
"""The :class:`numpy.dtype` of the data of this cube."""
return self.lazy_data().dtype
@property
def ndim(self):
"""The number of dimensions in the data of this cube."""
return len(self.shape)
def lazy_data(self, array=None):
"""
Return a :class:`biggus.Array` representing the
multi-dimensional data of the Cube, and optionally provide a
new array of values.
Accessing this method will never cause the data to be loaded.
Similarly, calling methods on, or indexing, the returned Array
will not cause the Cube to have loaded data.
If the data have already been loaded for the Cube, the returned
Array will be a :class:`biggus.NumpyArrayAdapter` which wraps
the numpy array from `self.data`.
Kwargs:
* array (:class:`biggus.Array` or None):
When this is not None it sets the multi-dimensional data of
the cube to the given value.
Returns:
A :class:`biggus.Array` representing the multi-dimensional
data of the Cube.
"""
if array is not None:
if not isinstance(array, biggus.Array):
raise TypeError('new values must be a biggus.Array')
if self.shape != array.shape:
# The _ONLY_ data reshape permitted is converting a
# 0-dimensional array into a 1-dimensional array of
# length one.
# i.e. self.shape = () and array.shape == (1,)
if self.shape or array.shape != (1,):
raise ValueError('Require cube data with shape %r, got '
'%r.' % (self.shape, array.shape))
self._my_data = array
else:
array = self._my_data
if not isinstance(array, biggus.Array):
array = biggus.NumpyArrayAdapter(array)
return array
@property
def data(self):
"""
The :class:`numpy.ndarray` representing the multi-dimensional data of
the cube.
.. note::
Cubes obtained from netCDF, PP, and FieldsFile files will only
populate this attribute on its first use.
To obtain the shape of the data without causing it to be loaded,
use the Cube.shape attribute.
Example::
>>> fname = iris.sample_data_path('air_temp.pp')
>>> cube = iris.load_cube(fname, 'air_temperature')
>>> # cube.data does not yet have a value.
...
>>> print(cube.shape)
(73, 96)
>>> # cube.data still does not have a value.
...
>>> cube = cube[:10, :20]
>>> # cube.data still does not have a value.
...
>>> data = cube.data
>>> # Only now is the data loaded.
...
>>> print(data.shape)
(10, 20)
"""
data = self._my_data
if not isinstance(data, np.ndarray):
try:
data = data.masked_array()
except MemoryError:
msg = "Failed to create the cube's data as there was not" \
" enough memory available.\n" \
"The array shape would have been {0!r} and the data" \
" type {1}.\n" \
"Consider freeing up variables or indexing the cube" \
" before getting its data."
msg = msg.format(self.shape, data.dtype)
raise MemoryError(msg)
# Unmask the array only if it is filled.
if ma.count_masked(data) == 0:
data = data.data
self._my_data = data
return data
@data.setter
def data(self, value):
data = np.asanyarray(value)
if self.shape != data.shape:
# The _ONLY_ data reshape permitted is converting a 0-dimensional
# array i.e. self.shape == () into a 1-dimensional array of length
# one i.e. data.shape == (1,)
if self.shape or data.shape != (1,):
raise ValueError('Require cube data with shape %r, got '
'%r.' % (self.shape, data.shape))
self._my_data = data
def has_lazy_data(self):
return isinstance(self._my_data, biggus.Array)
@property
def dim_coords(self):
"""
Return a tuple of all the dimension coordinates, ordered by dimension.
.. note::
The length of the returned tuple is not necessarily the same as
:attr:`Cube.ndim` as there may be dimensions on the cube without
dimension coordinates. It is therefore unreliable to use the
resulting tuple to identify the dimension coordinates for a given
dimension - instead use the :meth:`Cube.coord` method with the
``dimensions`` and ``dim_coords`` keyword arguments.
"""
return tuple((coord for coord, dim in
sorted(self._dim_coords_and_dims,
key=lambda co_di: (co_di[1], co_di[0].name()))))
@property
def aux_coords(self):
"""
Return a tuple of all the auxiliary coordinates, ordered by
dimension(s).
"""
return tuple((coord for coord, dims in
sorted(self._aux_coords_and_dims,
key=lambda co_di: (co_di[1], co_di[0].name()))))
@property
def derived_coords(self):
"""
Return a tuple of all the coordinates generated by the coordinate
factories.
"""
return tuple(factory.make_coord(self.coord_dims) for factory in
sorted(self.aux_factories,
key=lambda factory: factory.name()))
@property
def aux_factories(self):
"""Return a tuple of all the coordinate factories."""
return tuple(self._aux_factories)
def _summary_coord_extra(self, coord, indent):
# Returns the text needed to ensure this coordinate can be
# distinguished from all others with the same name.
extra = ''
similar_coords = self.coords(coord.name())
if len(similar_coords) > 1:
# Find all the attribute keys
keys = set()
for similar_coord in similar_coords:
keys.update(six.iterkeys(similar_coord.attributes))
# Look for any attributes that vary
vary = set()
attributes = {}
for key in keys:
for similar_coord in similar_coords:
if key not in similar_coord.attributes:
vary.add(key)
break
value = similar_coord.attributes[key]
if attributes.setdefault(key, value) != value:
vary.add(key)
break
keys = sorted(vary & set(coord.attributes.keys()))
bits = ['{}={!r}'.format(key, coord.attributes[key]) for key in
keys]
if bits:
extra = indent + ', '.join(bits)
return extra
def _summary_extra(self, coords, summary, indent):
# Where necessary, inserts extra lines into the summary to ensure
# coordinates can be distinguished.
new_summary = []
for coord, summary in zip(coords, summary):
new_summary.append(summary)
extra = self._summary_coord_extra(coord, indent)
if extra:
new_summary.append(extra)
return new_summary
def summary(self, shorten=False, name_padding=35):
"""
Unicode string summary of the Cube with name, a list of dim coord names
versus length and optionally relevant coordinate information.
"""
# Create a set to contain the axis names for each data dimension.
dim_names = [set() for dim in range(len(self.shape))]
# Add the dim_coord names that participate in the associated data
# dimensions.
for dim in range(len(self.shape)):
dim_coords = self.coords(contains_dimension=dim, dim_coords=True)
if dim_coords:
dim_names[dim].add(dim_coords[0].name())
else:
dim_names[dim].add('-- ')
# Convert axes sets to lists and sort.
dim_names = [sorted(names, key=sorted_axes) for names in dim_names]
# Generate textual summary of the cube dimensionality.
if self.shape == ():
dimension_header = 'scalar cube'
else:
dimension_header = '; '.join(
[', '.join(dim_names[dim]) +
': %d' % dim_shape for dim, dim_shape in
enumerate(self.shape)])
nameunit = '{name} / ({units})'.format(name=self.name(),
units=self.units)
cube_header = '{nameunit!s:{length}} ({dimension})'.format(
length=name_padding,
nameunit=nameunit,
dimension=dimension_header)
summary = ''
# Generate full cube textual summary.
if not shorten:
indent = 10
extra_indent = ' ' * 13
# Cache the derived coords so we can rely on consistent
# object IDs.
derived_coords = self.derived_coords
# Determine the cube coordinates that are scalar (single-valued)
# AND non-dimensioned.
dim_coords = self.dim_coords
aux_coords = self.aux_coords
all_coords = dim_coords + aux_coords + derived_coords
scalar_coords = [coord for coord in all_coords if not
self.coord_dims(coord) and coord.shape == (1,)]
# Determine the cube coordinates that are not scalar BUT
# dimensioned.
scalar_coord_ids = set(map(id, scalar_coords))
vector_dim_coords = [coord for coord in dim_coords if id(coord) not
in scalar_coord_ids]
vector_aux_coords = [coord for coord in aux_coords if id(coord) not
in scalar_coord_ids]
vector_derived_coords = [coord for coord in derived_coords if
id(coord) not in scalar_coord_ids]
# Determine the cube coordinates that don't describe the cube and
# are most likely erroneous.
vector_coords = vector_dim_coords + vector_aux_coords + \
vector_derived_coords
ok_coord_ids = scalar_coord_ids.union(set(map(id, vector_coords)))
invalid_coords = [coord for coord in all_coords if id(coord) not
in ok_coord_ids]
# Sort scalar coordinates by name.
scalar_coords.sort(key=lambda coord: coord.name())
# Sort vector coordinates by data dimension and name.
vector_dim_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
vector_aux_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
vector_derived_coords.sort(
key=lambda coord: (self.coord_dims(coord), coord.name()))
# Sort other coordinates by name.
invalid_coords.sort(key=lambda coord: coord.name())
#
# Generate textual summary of cube vector coordinates.
#
def vector_summary(vector_coords, cube_header, max_line_offset):
"""
Generates a list of suitably aligned strings containing coord
names and dimensions indicated by one or more 'x' symbols.
.. note::
The function may need to update the cube header so this is
returned with the list of strings.
"""
vector_summary = []
if vector_coords:
# Identify offsets for each dimension text marker.
alignment = np.array([index for index, value in
enumerate(cube_header) if
value == ':'])
# Generate basic textual summary for each vector coordinate
# - WITHOUT dimension markers.
for coord in vector_coords:
vector_summary.append('%*s%s' % (
indent, ' ', iris.util.clip_string(coord.name())))
min_alignment = min(alignment)
# Determine whether the cube header requires realignment
# due to one or more longer vector coordinate summaries.
if max_line_offset >= min_alignment:
delta = max_line_offset - min_alignment + 5
cube_header = '%-*s (%s)' % (int(name_padding + delta),
self.name() or 'unknown',
dimension_header)
alignment += delta
# Generate full textual summary for each vector coordinate
# - WITH dimension markers.
for index, coord in enumerate(vector_coords):
dims = self.coord_dims(coord)
for dim in range(len(self.shape)):
width = alignment[dim] - len(vector_summary[index])
char = 'x' if dim in dims else '-'
line = '{pad:{width}}{char}'.format(pad=' ',
width=width,
char=char)
vector_summary[index] += line
# Interleave any extra lines that are needed to distinguish
# the coordinates.
vector_summary = self._summary_extra(vector_coords,
vector_summary,
extra_indent)
return vector_summary, cube_header
# Calculate the maximum line offset.
max_line_offset = 0
for coord in all_coords:
max_line_offset = max(max_line_offset, len('%*s%s' % (
indent, ' ', iris.util.clip_string(str(coord.name())))))
if vector_dim_coords:
dim_coord_summary, cube_header = vector_summary(
vector_dim_coords, cube_header, max_line_offset)
summary += '\n Dimension coordinates:\n' + \
'\n'.join(dim_coord_summary)
if vector_aux_coords:
aux_coord_summary, cube_header = vector_summary(
vector_aux_coords, cube_header, max_line_offset)
summary += '\n Auxiliary coordinates:\n' + \
'\n'.join(aux_coord_summary)
if vector_derived_coords:
derived_coord_summary, cube_header = vector_summary(
vector_derived_coords, cube_header, max_line_offset)
summary += '\n Derived coordinates:\n' + \
'\n'.join(derived_coord_summary)
#
# Generate textual summary of cube scalar coordinates.
#
scalar_summary = []
if scalar_coords:
for coord in scalar_coords:
if (coord.units in ['1', 'no_unit', 'unknown'] or
coord.units.is_time_reference()):
unit = ''
else:
unit = ' {!s}'.format(coord.units)
# Format cell depending on type of point and whether it
# has a bound
with iris.FUTURE.context(cell_datetime_objects=False):
coord_cell = coord.cell(0)
if isinstance(coord_cell.point, six.string_types):
# Indent string type coordinates
coord_cell_split = [iris.util.clip_string(str(item))
for item in
coord_cell.point.split('\n')]
line_sep = '\n{pad:{width}}'.format(
pad=' ', width=indent + len(coord.name()) + 2)
coord_cell_str = line_sep.join(coord_cell_split) + unit
else:
# Human readable times
if coord.units.is_time_reference():
coord_cell_cpoint = coord.units.num2date(
coord_cell.point)
if coord_cell.bound is not None:
coord_cell_cbound = coord.units.num2date(
coord_cell.bound)
else:
coord_cell_cpoint = coord_cell.point
coord_cell_cbound = coord_cell.bound
coord_cell_str = '{!s}{}'.format(coord_cell_cpoint,
unit)
if coord_cell.bound is not None:
bound = '({})'.format(', '.join(str(val) for
val in coord_cell_cbound))
coord_cell_str += ', bound={}{}'.format(bound,
unit)
scalar_summary.append('{pad:{width}}{name}: {cell}'.format(
pad=' ', width=indent, name=coord.name(),
cell=coord_cell_str))
# Interleave any extra lines that are needed to distinguish
# the coordinates.
scalar_summary = self._summary_extra(scalar_coords,
scalar_summary,
extra_indent)
summary += '\n Scalar coordinates:\n' + '\n'.join(
scalar_summary)
#
# Generate summary of cube's invalid coordinates.
#
if invalid_coords:
invalid_summary = []
for coord in invalid_coords:
invalid_summary.append(
'%*s%s' % (indent, ' ', coord.name()))
# Interleave any extra lines that are needed to distinguish the
# coordinates.
invalid_summary = self._summary_extra(
invalid_coords, invalid_summary, extra_indent)
summary += '\n Invalid coordinates:\n' + \
'\n'.join(invalid_summary)
#
# Generate summary of cube attributes.
#
if self.attributes:
attribute_lines = []
for name, value in sorted(six.iteritems(self.attributes)):
value = iris.util.clip_string(six.text_type(value))
line = u'{pad:{width}}{name}: {value}'.format(pad=' ',
width=indent,
name=name,
value=value)
attribute_lines.append(line)
summary += '\n Attributes:\n' + '\n'.join(attribute_lines)
#
# Generate summary of cube cell methods
#
if self.cell_methods:
summary += '\n Cell methods:\n'
cm_lines = []
for cm in self.cell_methods:
cm_lines.append('%*s%s' % (indent, ' ', str(cm)))
summary += '\n'.join(cm_lines)
# Construct the final cube summary.
summary = cube_header + summary
return summary
def assert_valid(self):
"""
Does nothing and returns None.
.. deprecated:: 0.8
"""
warnings.warn('Cube.assert_valid() has been deprecated.')
def __str__(self):
# six has a decorator for this bit, but it doesn't do errors='replace'.
if six.PY3:
return self.summary()
else:
return self.summary().encode(errors='replace')
def __unicode__(self):
return self.summary()
def __repr__(self):
return "<iris 'Cube' of %s>" % self.summary(shorten=True,
name_padding=1)
def __iter__(self):
raise TypeError('Cube is not iterable')
def __getitem__(self, keys):
"""
Cube indexing (through use of square bracket notation) has been
implemented at the data level. That is, the indices provided to this
method should be aligned to the data of the cube, and thus the indices
requested must be applicable directly to the cube.data attribute. All
metadata will be subsequently indexed appropriately.
"""
# turn the keys into a full slice spec (all dims)
full_slice = iris.util._build_full_slice_given_keys(keys,
len(self.shape))
# make indexing on the cube column based by using the
# column_slices_generator (potentially requires slicing the data
# multiple times)
dimension_mapping, slice_gen = iris.util.column_slices_generator(
full_slice, len(self.shape))
new_coord_dims = lambda coord_: [dimension_mapping[d] for d in
self.coord_dims(coord_) if
dimension_mapping[d] is not None]
try:
first_slice = next(slice_gen)
except StopIteration:
first_slice = None
if first_slice is not None:
data = self._my_data[first_slice]
else:
data = copy.deepcopy(self._my_data)
for other_slice in slice_gen:
data = data[other_slice]
# We don't want a view of the data, so take a copy of it if it's
# not already our own.
if isinstance(data, biggus.Array) or not data.flags['OWNDATA']:
data = copy.deepcopy(data)
# We can turn a masked array into a normal array if it's full.
if isinstance(data, ma.core.MaskedArray):
if ma.count_masked(data) == 0:
data = data.filled()
# Make the new cube slice
cube = Cube(data)
cube.metadata = copy.deepcopy(self.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
# Slice the coords
for coord in self.aux_coords:
coord_keys = tuple([full_slice[dim] for dim in
self.coord_dims(coord)])
try:
new_coord = coord[coord_keys]
except ValueError:
# TODO make this except more specific to catch monotonic error
# Attempt to slice it by converting to AuxCoord first
new_coord = iris.coords.AuxCoord.from_coord(coord)[coord_keys]
cube.add_aux_coord(new_coord, new_coord_dims(coord))
coord_mapping[id(coord)] = new_coord
for coord in self.dim_coords:
coord_keys = tuple([full_slice[dim] for dim in
self.coord_dims(coord)])
new_dims = new_coord_dims(coord)
# Try/Catch to handle slicing that makes the points/bounds
# non-monotonic
try:
new_coord = coord[coord_keys]
if not new_dims:
# If the associated dimension has been sliced so the coord
# is a scalar move the coord to the aux_coords container
cube.add_aux_coord(new_coord, new_dims)
else:
cube.add_dim_coord(new_coord, new_dims)
except ValueError:
# TODO make this except more specific to catch monotonic error
# Attempt to slice it by converting to AuxCoord first
new_coord = iris.coords.AuxCoord.from_coord(coord)[coord_keys]
cube.add_aux_coord(new_coord, new_dims)
coord_mapping[id(coord)] = new_coord
for factory in self.aux_factories:
cube.add_aux_factory(factory.updated(coord_mapping))
return cube
def subset(self, coord):
"""
Get a subset of the cube by providing the desired resultant coordinate.
"""
if not isinstance(coord, iris.coords.Coord):
raise ValueError('coord_to_extract must be a valid Coord.')
# Get the coord to extract from the cube
coord_to_extract = self.coord(coord)
if len(self.coord_dims(coord_to_extract)) > 1:
msg = "Currently, only 1D coords can be used to subset a cube"
raise iris.exceptions.CoordinateMultiDimError(msg)
# Identify the dimension of the cube which this coordinate references
coord_to_extract_dim = self.coord_dims(coord_to_extract)[0]
# Identify the indices which intersect the requested coord and
# coord_to_extract
coordinate_indices = coord_to_extract.intersect(coord,
return_indices=True)
# Build up a slice which spans the whole of the cube
full_slice = [slice(None, None)] * len(self.shape)
# Update the full slice to only extract specific indices which were
# identified above
full_slice[coord_to_extract_dim] = coordinate_indices
full_slice = tuple(full_slice)
return self[full_slice]
def extract(self, constraint):
"""
Filter the cube by the given constraint using
:meth:`iris.Constraint.extract` method.
"""
# Cast the constraint into a proper constraint if it is not so already
constraint = iris._constraints.as_constraint(constraint)
return constraint.extract(self)
def intersection(self, *args, **kwargs):
"""
Return the intersection of the cube with specified coordinate
ranges.
Coordinate ranges can be specified as:
(a) instances of :class:`iris.coords.CoordExtent`.
(b) keyword arguments, where the keyword name specifies the name
of the coordinate (as defined in :meth:`iris.cube.Cube.coords()`)
and the value defines the corresponding range of coordinate
values as a tuple. The tuple must contain two, three, or four
items corresponding to: (minimum, maximum, min_inclusive,
max_inclusive). Where the items are defined as:
* minimum
The minimum value of the range to select.
* maximum
The maximum value of the range to select.
* min_inclusive
If True, coordinate values equal to `minimum` will be included
in the selection. Default is True.
* max_inclusive
If True, coordinate values equal to `maximum` will be included
in the selection. Default is True.
To perform an intersection that ignores any bounds on the coordinates,
set the optional keyword argument *ignore_bounds* to True. Defaults to
False.
.. note::
For ranges defined over "circular" coordinates (i.e. those
where the `units` attribute has a modulus defined) the cube
will be "rolled" to fit where neccesary.
.. warning::
Currently this routine only works with "circular"
coordinates (as defined in the previous note.)
For example::
>>> import iris
>>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
>>> print(cube.coord('longitude').points[::10])
[ 0. 37.49999237 74.99998474 112.49996948 \
149.99996948
187.49995422 224.99993896 262.49993896 299.99993896 \
337.49990845]
>>> subset = cube.intersection(longitude=(30, 50))
>>> print(subset.coord('longitude').points)
[ 33.74999237 37.49999237 41.24998856 44.99998856 48.74998856]
>>> subset = cube.intersection(longitude=(-10, 10))
>>> print(subset.coord('longitude').points)
[-7.50012207 -3.75012207 0. 3.75 7.5 ]
Returns:
A new :class:`~iris.cube.Cube` giving the subset of the cube
which intersects with the requested coordinate intervals.
"""
result = self
ignore_bounds = kwargs.pop('ignore_bounds', False)
for arg in args:
result = result._intersect(*arg, ignore_bounds=ignore_bounds)
for name, value in six.iteritems(kwargs):
result = result._intersect(name, *value,
ignore_bounds=ignore_bounds)
return result
def _intersect(self, name_or_coord, minimum, maximum,
min_inclusive=True, max_inclusive=True,
ignore_bounds=False):
coord = self.coord(name_or_coord)
if coord.ndim != 1:
raise iris.exceptions.CoordinateMultiDimError(coord)
if coord.nbounds not in (0, 2):
raise ValueError('expected 0 or 2 bound values per cell')
if minimum > maximum:
raise ValueError('minimum greater than maximum')
modulus = coord.units.modulus
if modulus is None:
raise ValueError('coordinate units with no modulus are not yet'
' supported')
subsets, points, bounds = self._intersect_modulus(coord,
minimum, maximum,
min_inclusive,
max_inclusive,
ignore_bounds)
# By this point we have either one or two subsets along the relevant
# dimension. If it's just one subset (which might be a slice or an
# unordered collection of indices) we can simply index the cube
# and we're done. If it's two subsets we need to stitch the two
# pieces together.
# subsets provides a way of slicing the coordinates to ensure that
# they remain contiguous. In doing so, this can mean
# transforming the data (this stitching together of two separate
# pieces).
def make_chunk(key):
chunk = self[key_tuple_prefix + (key,)]
chunk_coord = chunk.coord(coord)
chunk_coord.points = points[(key,)]
if chunk_coord.has_bounds():
chunk_coord.bounds = bounds[(key,)]
return chunk
dim, = self.coord_dims(coord)
key_tuple_prefix = (slice(None),) * dim
chunks = [make_chunk(key) for key in subsets]
if len(chunks) == 1:
result = chunks[0]
else:
if self.has_lazy_data():
data = biggus.LinearMosaic([chunk.lazy_data()
for chunk in chunks],
dim)
else:
module = ma if ma.isMaskedArray(self.data) else np
data = module.concatenate([chunk.data for chunk in chunks],
dim)
result = iris.cube.Cube(data)
result.metadata = copy.deepcopy(self.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
def create_coords(src_coords, add_coord):
# Add copies of the source coordinates, selecting
# the appropriate subsets out of coordinates which
# share the intersection dimension.
preserve_circular = (min_inclusive and max_inclusive and
abs(maximum - minimum) == modulus)
for src_coord in src_coords:
dims = self.coord_dims(src_coord)
if dim in dims:
dim_within_coord = dims.index(dim)
points = np.concatenate([chunk.coord(src_coord).points
for chunk in chunks],
dim_within_coord)
if src_coord.has_bounds():
bounds = np.concatenate(
[chunk.coord(src_coord).bounds
for chunk in chunks],
dim_within_coord)
else:
bounds = None
result_coord = src_coord.copy(points=points,
bounds=bounds)
circular = getattr(result_coord, 'circular', False)
if circular and not preserve_circular:
result_coord.circular = False
else:
result_coord = src_coord.copy()
add_coord(result_coord, dims)
coord_mapping[id(src_coord)] = result_coord
create_coords(self.dim_coords, result.add_dim_coord)
create_coords(self.aux_coords, result.add_aux_coord)
for factory in self.aux_factories:
result.add_aux_factory(factory.updated(coord_mapping))
return result
def _intersect_derive_subset(self, coord, points, bounds, inside_indices):
# Return the subsets, i.e. the means to allow the slicing of
# coordinates to ensure that they remain contiguous.
modulus = coord.units.modulus
delta = coord.points[inside_indices] - points[inside_indices]
step = np.rint(np.diff(delta) / modulus)
non_zero_step_indices = np.nonzero(step)[0]
def dim_coord_subset():
"""
Derive the subset for dimension coordinates.
Ensure that we do not wrap if blocks are at the very edge. That
is, if the very edge is wrapped and corresponds to base + period,
stop this unnecessary wraparound.
"""
# A contiguous block at the start and another at the end.
# (NB. We can't have more than two blocks because we've already
# restricted the coordinate's range to its modulus).
end_of_first_chunk = non_zero_step_indices[0]
index_of_second_chunk = inside_indices[end_of_first_chunk + 1]
final_index = points.size - 1
# Condition1: The two blocks don't themselves wrap
# (inside_indices is contiguous).
# Condition2: Are we chunked at either extreme edge.
edge_wrap = ((index_of_second_chunk ==
inside_indices[end_of_first_chunk] + 1) and
index_of_second_chunk in (final_index, 1))
subsets = None
if edge_wrap:
# Increasing coord
if coord.points[-1] > coord.points[0]:
index_end = -1
index_start = 0
# Decreasing coord
else:
index_end = 0
index_start = -1
# Unwrap points and bounds (if present and equal base + period)
if bounds is not None:
edge_equal_base_period = (
np.isclose(coord.bounds[index_end, index_end],
coord.bounds[index_start, index_start] +
modulus))
if edge_equal_base_period:
bounds[index_end, :] = coord.bounds[index_end, :]
else:
edge_equal_base_period = (
np.isclose(coord.points[index_end],
coord.points[index_start] +
modulus))
if edge_equal_base_period:
points[index_end] = coord.points[index_end]
subsets = [slice(inside_indices[0],
inside_indices[-1] + 1)]
# Either no edge wrap or edge wrap != base + period
# i.e. derive subset without alteration
if subsets is None:
subsets = [
slice(index_of_second_chunk, None),
slice(None, inside_indices[end_of_first_chunk] + 1)
]
return subsets
if isinstance(coord, iris.coords.DimCoord):
if non_zero_step_indices.size:
| subsets = dim_coord_subset() | 8,031 | lcc_e | python | null | fc73c9efea98568ceb6be090daf2a83234c653b1c0aa5303 |
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bibauthorid Web Interface Logic and URL handler. """
# pylint: disable=W0105
# pylint: disable=C0301
# pylint: disable=W0613
from cgi import escape
from pprint import pformat
from operator import itemgetter
import re
import urllib
try:
from invenio.jsonutils import json, json_unicode_to_utf8, CFG_JSON_AVAILABLE
except ImportError:
CFG_JSON_AVAILABLE = False
json = None
from invenio.bibauthorid_webapi import add_cname_to_hepname_record
from invenio.bibauthorid_webapi import create_new_person
from invenio.config import CFG_SITE_URL, CFG_BASE_URL
from invenio.bibauthorid_config import AID_ENABLED, PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT, \
BIBAUTHORID_UI_SKIP_ARXIV_STUB_PAGE, VALID_EXPORT_FILTERS, PERSONS_PER_PAGE, \
MAX_NUM_SHOW_PAPERS, BIBAUTHORID_CFG_SITE_NAME, CFG_BIBAUTHORID_ENABLED
from invenio.config import CFG_SITE_LANG, CFG_SITE_URL, CFG_INSPIRE_SITE, CFG_SITE_SECURE_URL
from invenio.bibauthorid_name_utils import most_relevant_name, clean_string
from invenio.webpage import page, pageheaderonly, pagefooteronly
from invenio.messages import gettext_set_language # , wash_language
from invenio.template import load
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.session import get_session
from invenio.urlutils import redirect_to_url, get_canonical_and_alternates_urls
from invenio.webuser import (getUid,
page_not_authorized,
collect_user_info,
set_user_preferences,
get_user_preferences,
email_valid_p,
emailUnique,
get_email_from_username,
get_uid_from_email,
isGuestUser)
from invenio.access_control_admin import acc_get_user_roles
from invenio.search_engine import perform_request_search
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibauthorid_config import CREATE_NEW_PERSON
from invenio.bibsched import bibsched_task_finished_successfully, \
bibsched_task_finished_with_error, bibsched_task_running, bibsched_task_waiting, \
UnknownBibschedStatus
import invenio.webinterface_handler_config as apache
import invenio.webauthorprofile_interface as webauthorapi
import invenio.bibauthorid_webapi as webapi
from invenio.bibauthorid_general_utils import get_title_of_arxiv_pubid, is_valid_orcid
from invenio.bibauthorid_backinterface import update_external_ids_of_authors, get_orcid_id_of_author, \
get_validated_request_tickets_for_author, get_title_of_paper, get_claimed_papers_of_author, \
get_free_author_id
from invenio.bibauthorid_dbinterface import defaultdict, remove_arxiv_papers_of_author, \
get_author_by_canonical_name, get_token, set_token, remove_rtid_from_ticket
from invenio.orcidutils import get_dois_from_orcid, get_dois_from_orcid_using_pid
from invenio.bibauthorid_webauthorprofileinterface import is_valid_canonical_id, get_person_id_from_canonical_id, \
get_person_redirect_link, author_has_papers
from invenio.bibauthorid_templates import WebProfileMenu, WebProfilePage
from invenio.bibauthorid_general_utils import get_inspire_record_url
from invenio.bibcatalog import BIBCATALOG_SYSTEM
# Imports related to hepnames update form
from invenio.bibedit_utils import get_bibrecord
from invenio.bibrecord import record_get_field_value, record_get_field_values, \
record_get_field_instances, field_get_subfield_values
from invenio.bibauthorid_name_utils import split_name_parts
from invenio.orcidutils import push_orcid_papers
TEMPLATE = load('bibauthorid')
class WebInterfaceBibAuthorIDClaimPages(WebInterfaceDirectory):
'''
Handles /author/claim pages and AJAX requests.
Supplies the methods:
/author/claim/<string>
/author/claim/action
/author/claim/claimstub
/author/claim/export
/author/claim/merge_profiles_ajax
/author/claim/search_box_ajax
/author/claim/tickets_admin
/author/claim/search
'''
_exports = ['',
'action',
'claimstub',
'export',
'merge_profiles_ajax',
'search_box_ajax',
'tickets_admin'
]
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDClaimPages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
return
# check if it's a canonical id: e.g. "J.R.Ellis.1"
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
self.person_id = int(identifier)
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Serve the main person page.
Will use the object's person id to get a person's information.
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: a full page formatted in HTML
@rtype: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'open_claim': (str, None),
'ticketid': (int, -1),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
req.argd = argd # needed for perform_req_search
if self.person_id < 0:
return redirect_to_url(req, '%s/author/search' % (CFG_SITE_URL))
no_access = self._page_access_permission_wall(req, [self.person_id])
if no_access:
return no_access
pinfo['claim_in_process'] = True
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = pinfo['claim_in_process']
session.dirty = True
if self.person_id != -1:
pinfo['claimpaper_admin_last_viewed_pid'] = self.person_id
rt_ticket_id = argd['ticketid']
if rt_ticket_id != -1:
pinfo["admin_requested_ticket_id"] = rt_ticket_id
session.dirty = True
# Create menu and page using templates
cname = webapi.get_canonical_id_from_person_id(self.person_id)
menu = WebProfileMenu(str(cname), "claim", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("claim", webapi.get_longest_name_from_pid(self.person_id))
profile_page.add_profile_menu(menu)
full_name = webapi.get_longest_name_from_pid(self.person_id)
page_title = '%s - Publications Management' % full_name
guest_prompt = 'true'
if not CFG_INSPIRE_SITE:
guest_prompt = 'false'
if 'prompt_shown' not in session:
session['prompt_shown'] = False
if session['prompt_shown']:
guest_prompt = 'false'
else:
session['prompt_shown'] = True
session.dirty = True
profile_page.add_bootstrapped_data(json.dumps({
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s", guestPrompt: %s});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel, guest_prompt)
}))
if debug:
profile_page.add_debug_info(session)
# body = self._generate_optional_menu(ulevel, req, form)
content = self._generate_tabs(ulevel, req)
content += self._generate_footer(ulevel)
content = content.decode('utf-8', 'strict')
webapi.history_log_visit(req, 'claim', pid=self.person_id)
return page(title=page_title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=profile_page.get_wrapped_body("generic", {'html': content}).encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _page_access_permission_wall(self, req, req_pid=None, req_level=None):
'''
Display an error page if user not authorized to use the interface.
@param req: Apache Request Object for session management
@type req: Apache Request Object
@param req_pid: Requested person id
@type req_pid: int
@param req_level: Request level required for the page
@type req_level: string
'''
session = get_session(req)
uid = getUid(req)
pinfo = session["personinfo"]
uinfo = collect_user_info(req)
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
is_authorized = True
pids_to_check = []
if not AID_ENABLED:
return page_not_authorized(req, text=_("Fatal: Author ID capabilities are disabled on this system."))
if req_level and 'ulevel' in pinfo and pinfo["ulevel"] != req_level:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
if req_pid and not isinstance(req_pid, list):
pids_to_check = [req_pid]
elif req_pid and isinstance(req_pid, list):
pids_to_check = req_pid
if (not (uinfo['precached_usepaperclaim']
or uinfo['precached_usepaperattribution'])
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
if is_authorized and not webapi.user_can_view_CMP(uid):
is_authorized = False
if is_authorized and 'ticket' in pinfo:
for tic in pinfo["ticket"]:
if 'pid' in tic:
pids_to_check.append(tic['pid'])
if pids_to_check and is_authorized:
user_pid = webapi.get_pid_from_uid(uid)
if not uinfo['precached_usepaperattribution']:
if (not user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
elif (user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
for tic in list(pinfo["ticket"]):
if not tic["pid"] == user_pid:
pinfo['ticket'].remove(tic)
if not is_authorized:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
else:
return ""
def _generate_title(self, ulevel):
'''
Generates the title for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: title
@rtype: str
'''
def generate_title_guest():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_user():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (user interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_admin():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (administrator interface) for: ' + str(
webapi.get_person_redirect_link(self.person_id))
return title
generate_title = {'guest': generate_title_guest,
'user': generate_title_user,
'admin': generate_title_admin}
return generate_title[ulevel]()
def _generate_tabs(self, ulevel, req):
'''
Generates the tabs content for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: tabs content
@rtype: str
'''
from invenio.bibauthorid_templates import verbiage_dict as tmpl_verbiage_dict
from invenio.bibauthorid_templates import buttons_verbiage_dict as tmpl_buttons_verbiage_dict
def generate_tabs_guest(req):
links = list() # ['delete', 'commit','del_entry','commit_entry']
tabs = ['records', 'repealed', 'review']
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=list(),
verbiage_dict=tmpl_verbiage_dict['guest'],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['guest'],
show_reset_button=False)
def generate_tabs_user(req):
links = ['delete', 'del_entry']
tabs = ['records', 'repealed', 'review', 'tickets']
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
user_is_owner = 'not_owner'
if pinfo["claimpaper_admin_last_viewed_pid"] == webapi.get_pid_from_uid(uid):
user_is_owner = 'owner'
open_tickets = webapi.get_person_request_ticket(self.person_id)
tickets = list()
for t in open_tickets:
owns = False
for row in t[0]:
if row[0] == 'uid-ip' and row[1].split('||')[0] == str(uid):
owns = True
if owns:
tickets.append(t)
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=tickets,
verbiage_dict=tmpl_verbiage_dict['user'][user_is_owner],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['user'][user_is_owner])
def generate_tabs_admin(req, show_tabs=['records', 'repealed', 'review', 'comments', 'tickets', 'data'],
ticket_links=['delete', 'commit', 'del_entry', 'commit_entry'], open_tickets=None,
verbiage_dict=None, buttons_verbiage_dict=None, show_reset_button=True):
session = get_session(req)
personinfo = dict()
try:
personinfo = session["personinfo"]
except KeyError:
return ""
if 'ln' in personinfo:
ln = personinfo["ln"]
else:
ln = CFG_SITE_LANG
all_papers = webapi.get_papers_by_person_id(self.person_id, ext_out=True)
records = [{'recid': paper[0],
'bibref': paper[1],
'flag': paper[2],
'authorname': paper[3],
'authoraffiliation': paper[4],
'paperdate': paper[5],
'rt_status': paper[6],
'paperexperiment': paper[7]} for paper in all_papers]
rejected_papers = [row for row in records if row['flag'] < -1]
rest_of_papers = [row for row in records if row['flag'] >= -1]
review_needed = webapi.get_review_needing_records(self.person_id)
if len(review_needed) < 1:
if 'review' in show_tabs:
show_tabs.remove('review')
if open_tickets is None:
open_tickets = webapi.get_person_request_ticket(self.person_id)
else:
if len(open_tickets) < 1 and 'tickets' in show_tabs:
show_tabs.remove('tickets')
rt_tickets = None
if "admin_requested_ticket_id" in personinfo:
rt_tickets = personinfo["admin_requested_ticket_id"]
if verbiage_dict is None:
verbiage_dict = translate_dict_values(tmpl_verbiage_dict['admin'], ln)
if buttons_verbiage_dict is None:
buttons_verbiage_dict = translate_dict_values(tmpl_buttons_verbiage_dict['admin'], ln)
# send data to the template function
tabs = TEMPLATE.tmpl_admin_tabs(ln, person_id=self.person_id,
rejected_papers=rejected_papers,
rest_of_papers=rest_of_papers,
review_needed=review_needed,
rt_tickets=rt_tickets,
open_rt_tickets=open_tickets,
show_tabs=show_tabs,
ticket_links=ticket_links,
verbiage_dict=verbiage_dict,
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button)
return tabs
def translate_dict_values(dictionary, ln):
def translate_str_values(dictionary, f=lambda x: x):
translated_dict = dict()
for key, value in dictionary.iteritems():
if isinstance(value, str):
translated_dict[key] = f(value)
elif isinstance(value, dict):
translated_dict[key] = translate_str_values(value, f)
else:
raise TypeError("Value should be either string or dictionary.")
return translated_dict
return translate_str_values(dictionary, f=gettext_set_language(ln))
generate_tabs = {'guest': generate_tabs_guest,
'user': generate_tabs_user,
'admin': generate_tabs_admin}
return generate_tabs[ulevel](req)
def _generate_footer(self, ulevel):
'''
Generates the footer for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: footer
@rtype: str
'''
def generate_footer_guest():
return TEMPLATE.tmpl_invenio_search_box()
def generate_footer_user():
return generate_footer_guest()
def generate_footer_admin():
return generate_footer_guest()
generate_footer = {'guest': generate_footer_guest,
'user': generate_footer_user,
'admin': generate_footer_admin}
return generate_footer[ulevel]()
def _ticket_dispatch_end(self, req):
'''
The ticket dispatch is finished, redirect to the original page of
origin or to the last_viewed_pid or return to the papers autoassigned box to populate its data
'''
session = get_session(req)
pinfo = session["personinfo"]
webapi.session_bareinit(req)
if 'claim_in_process' in pinfo:
pinfo['claim_in_process'] = False
if "merge_ticket" in pinfo and pinfo['merge_ticket']:
pinfo['merge_ticket'] = []
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = True
session.dirty = True
if "referer" in pinfo and pinfo["referer"]:
referer = pinfo["referer"]
del(pinfo["referer"])
session.dirty = True
return redirect_to_url(req, referer)
# if we are coming fromt he autoclaim box we should not redirect and just return to the caller function
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == False and pinfo['autoclaim']['begin_autoclaim'] == True:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] = False
session.dirty = True
else:
redirect_page = webapi.history_get_last_visited_url(
pinfo['visit_diary'], limit_to_page=['manage_profile', 'claim'])
if not redirect_page:
redirect_page = webapi.get_fallback_redirect_link(req)
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] and pinfo['autoclaim']['checkout']:
redirect_page = '%s/author/claim/action?checkout=True' % (CFG_SITE_URL,)
pinfo['autoclaim']['checkout'] = False
session.dirty = True
elif not 'manage_profile' in redirect_page:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
redirect_page = '%s/author/claim/%s?open_claim=True' % (
CFG_SITE_URL,
webapi.get_person_redirect_link(pinfo["claimpaper_admin_last_viewed_pid"]))
else:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
return redirect_to_url(req, redirect_page)
# redirect_link = diary('get_redirect_link', caller='_ticket_dispatch_end', parameters=[('open_claim','True')])
# return redirect_to_url(req, redirect_link)
def _check_user_fields(self, req, form):
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'user_first_name': (str, None),
'user_last_name': (str, None),
'user_email': (str, None),
'user_comments': (str, None)})
session = get_session(req)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
skip_checkout_faulty_fields = False
if ulevel in ['user', 'admin']:
skip_checkout_faulty_fields = True
if not ("user_first_name_sys" in pinfo and pinfo["user_first_name_sys"]):
if "user_first_name" in argd and argd['user_first_name']:
if not argd["user_first_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_first_name")
else:
pinfo["user_first_name"] = escape(argd["user_first_name"])
if not ("user_last_name_sys" in pinfo and pinfo["user_last_name_sys"]):
if "user_last_name" in argd and argd['user_last_name']:
if not argd["user_last_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_last_name")
else:
pinfo["user_last_name"] = escape(argd["user_last_name"])
if not ("user_email_sys" in pinfo and pinfo["user_email_sys"]):
if "user_email" in argd and argd['user_email']:
if not email_valid_p(argd["user_email"]):
pinfo["checkout_faulty_fields"].append("user_email")
else:
pinfo["user_email"] = escape(argd["user_email"])
if (ulevel == "guest"
and emailUnique(argd["user_email"]) > 0):
pinfo["checkout_faulty_fields"].append("user_email_taken")
else:
pinfo["checkout_faulty_fields"].append("user_email")
if "user_comments" in argd:
if argd["user_comments"]:
pinfo["user_ticket_comments"] = escape(argd["user_comments"])
else:
pinfo["user_ticket_comments"] = ""
session.dirty = True
def action(self, req, form):
'''
Initial step in processing of requests: ticket generation/update.
Also acts as action dispatcher for interface mass action requests.
Valid mass actions are:
- add_external_id: add an external identifier to an author
- add_missing_external_ids: add missing external identifiers of an author
- bibref_check_submit:
- cancel: clean the session (erase tickets and so on)
- cancel_rt_ticket:
- cancel_search_ticket:
- cancel_stage:
- checkout:
- checkout_continue_claiming:
- checkout_remove_transaction:
- checkout_submit:
- claim: claim papers for an author
- commit_rt_ticket:
- confirm: confirm assignments to an author
- delete_external_ids: delete external identifiers of an author
- repeal: repeal assignments from an author
- reset: reset assignments of an author
- set_canonical_name: set/swap the canonical name of an author
- to_other_person: assign a document from an author to another author
@param req: apache request object
@type req: apache request object
@param form: parameters sent via GET or POST request
@type form: dict
@return: a full page formatted in HTML
@return: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session["personinfo"]
argd = wash_urlargd(form,
{'autoclaim_show_review': (str, None),
'canonical_name': (str, None),
'existing_ext_ids': (list, None),
'ext_id': (str, None),
'uid': (int, None),
'ext_system': (str, None),
'ln': (str, CFG_SITE_LANG),
'pid': (int, -1),
'primary_profile': (str, None),
'search_param': (str, None),
'rt_action': (str, None),
'rt_id': (int, None),
'selection': (list, None),
'rtid': (int, None),
# permitted actions
'add_external_id': (str, None),
'set_uid': (str, None),
'add_missing_external_ids': (str, None),
'associate_profile': (str, None),
'bibref_check_submit': (str, None),
'cancel': (str, None),
'cancel_merging': (str, None),
'cancel_rt_ticket': (str, None),
'cancel_search_ticket': (str, None),
'cancel_stage': (str, None),
'checkout': (str, None),
'checkout_continue_claiming': (str, None),
'checkout_remove_transaction': (str, None),
'checkout_submit': (str, None),
'assign': (str, None),
'commit_rt_ticket': (str, None),
'close_rt_ticket': (str, None),
'confirm': (str, None),
'delete_external_ids': (str, None),
'email': (str, None),
'merge': (str, None),
'reject': (str, None),
'repeal': (str, None),
'reset': (str, None),
'send_message': (str, None),
'set_canonical_name': (str, None),
'to_other_person': (str, None)})
ulevel = pinfo["ulevel"]
ticket = pinfo["ticket"]
uid = getUid(req)
ln = argd['ln']
action = None
permitted_actions = ['add_external_id',
'set_uid',
'add_missing_external_ids',
'associate_profile',
'bibref_check_submit',
'cancel',
'cancel_merging',
'cancel_rt_ticket',
'cancel_search_ticket',
'cancel_stage',
'checkout',
'checkout_continue_claiming',
'checkout_remove_transaction',
'checkout_submit',
'assign',
'close_rt_ticket',
'commit_rt_ticket',
'confirm',
'delete_external_ids',
'merge',
'reject',
'repeal',
'reset',
'send_message',
'set_canonical_name',
'to_other_person']
for act in permitted_actions:
# one action (the most) is enabled in the form
if argd[act] is not None:
action = act
no_access = self._page_access_permission_wall(req, None)
if no_access and action not in ["assign"]:
return no_access
# incomplete papers (incomplete paper info or other problems) trigger action function without user's interference
# in order to fix those problems and claim papers or remove them from the ticket
if (action is None
and "bibref_check_required" in pinfo
and pinfo["bibref_check_required"]):
if "bibref_check_reviewed_bibrefs" in pinfo:
del(pinfo["bibref_check_reviewed_bibrefs"])
session.dirty = True
def add_external_id():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot add external id to unknown person")
if argd['ext_system']:
ext_sys = argd['ext_system']
else:
return self._error_page(req, ln,
"Fatal: cannot add an external id without specifying the system")
if argd['ext_id']:
ext_id = argd['ext_id']
else:
return self._error_page(req, ln,
"Fatal: cannot add a custom external id without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.add_person_external_id(pid, ext_sys, ext_id, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
def set_uid():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: current user is unknown")
if argd['uid'] is not None:
dest_uid = int(argd['uid'])
else:
return self._error_page(req, ln,
"Fatal: user id is not valid")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.set_person_uid(pid, dest_uid, userinfo)
# remove arxiv pubs of current pid
remove_arxiv_papers_of_author(pid)
dest_uid_pid = webapi.get_pid_from_uid(dest_uid)
if dest_uid_pid > -1:
# move the arxiv pubs of the dest_uid to the current pid
dest_uid_arxiv_papers = webapi.get_arxiv_papers_of_author(dest_uid_pid)
webapi.add_arxiv_papers_to_author(dest_uid_arxiv_papers, pid)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
def add_missing_external_ids():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot recompute external ids for an unknown person")
update_external_ids_of_authors([pid], overwrite=False)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
def associate_profile():
'''
associates the user with user id to the person profile with pid
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot associate profile without a person id.")
uid = getUid(req)
pid, profile_claimed = webapi.claim_profile(uid, pid)
redirect_pid = pid
if profile_claimed:
pinfo['pid'] = pid
pinfo['should_check_to_autoclaim'] = True
pinfo["login_info_message"] = "confirm_success"
session.dirty = True
redirect_to_url(req, '%s/author/manage_profile/%s'
% (CFG_SITE_URL, urllib.quote(str(redirect_pid))))
# if someone have already claimed this profile it redirects to choose_profile with an error message
else:
param = ''
if 'search_param' in argd and argd['search_param']:
param = '&search_param=' + urllib.quote(argd['search_param'])
redirect_to_url(req, '%s/author/choose_profile?failed=%s%s' % (CFG_SITE_URL, True, param))
def bibref_check_submit():
pinfo["bibref_check_reviewed_bibrefs"] = list()
add_rev = pinfo["bibref_check_reviewed_bibrefs"].append
if ("bibrefs_auto_assigned" in pinfo
or "bibrefs_to_confirm" in pinfo):
person_reviews = list()
if ("bibrefs_auto_assigned" in pinfo
and pinfo["bibrefs_auto_assigned"]):
person_reviews.append(pinfo["bibrefs_auto_assigned"])
if ("bibrefs_to_confirm" in pinfo
and pinfo["bibrefs_to_confirm"]):
person_reviews.append(pinfo["bibrefs_to_confirm"])
for ref_review in person_reviews:
for person_id in ref_review:
for bibrec in ref_review[person_id]["bibrecs"]:
rec_grp = "bibrecgroup%s" % bibrec
elements = list()
if rec_grp in form:
if isinstance(form[rec_grp], str):
elements.append(form[rec_grp])
elif isinstance(form[rec_grp], list):
elements += form[rec_grp]
else:
continue
for element in elements:
test = element.split("||")
if test and len(test) > 1 and test[1]:
tref = test[1] + "," + str(bibrec)
tpid = webapi.wash_integer_id(test[0])
if (webapi.is_valid_bibref(tref)
and tpid > -1):
add_rev(element + "," + str(bibrec))
session.dirty = True
def cancel():
self.__session_cleanup(req)
return self._ticket_dispatch_end(req)
def cancel_merging():
'''
empties the session out of merge content and redirects to the manage profile page
that the user was viewing before the merge
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: Couldn't redirect to the previous page")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if pinfo['merge_profiles']:
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, urllib.quote(primary_cname))
return redirect_to_url(req, redirect_url)
def cancel_rt_ticket():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot cancel unknown ticket")
if argd['rt_id'] is not None and argd['rt_action'] is not None:
rt_id = int(argd['rt_id'])
rt_action = argd['rt_action']
for bibrefrec in bibrefrecs:
webapi.delete_transaction_from_request_ticket(pid, rt_id, rt_action, bibrefrec)
else:
rt_id = int(bibrefrecs[0])
webapi.delete_request_ticket(pid, rt_id)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, urllib.quote(str(pid))))
def cancel_search_ticket(without_return=False):
if 'search_ticket' in pinfo:
del(pinfo['search_ticket'])
session.dirty = True
if "claimpaper_admin_last_viewed_pid" in pinfo:
pid = pinfo["claimpaper_admin_last_viewed_pid"]
if not without_return:
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
if not without_return:
return self.search(req, form)
def cancel_stage():
if 'bibref_check_required' in pinfo:
del(pinfo['bibref_check_required'])
if 'bibrefs_auto_assigned' in pinfo:
del(pinfo['bibrefs_auto_assigned'])
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
for tt in [row for row in ticket if 'incomplete' in row]:
ticket.remove(tt)
session.dirty = True
return self._ticket_dispatch_end(req)
def checkout():
pass
# return self._ticket_final_review(req)
def checkout_continue_claiming():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
return self._ticket_dispatch_end(req)
def checkout_remove_transaction():
bibref = argd['checkout_remove_transaction']
if webapi.is_valid_bibref(bibref):
for rmt in [row for row in ticket if row["bibref"] == bibref]:
ticket.remove(rmt)
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def checkout_submit():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
if not ticket:
pinfo["checkout_faulty_fields"].append("tickets")
pinfo["checkout_confirmed"] = True
if pinfo["checkout_faulty_fields"]:
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def claim():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any papers selected. " + \
"Please go back and select which papers would you like to claim.")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot claim papers to an unknown person")
if action == 'assign':
claimed_recs = [paper[2] for paper in get_claimed_papers_of_author(pid)]
for bibrefrec in list(bibrefrecs):
_, rec = webapi.split_bibrefrec(bibrefrec)
if rec in claimed_recs:
bibrefrecs.remove(bibrefrec)
for bibrefrec in bibrefrecs:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
continue
ticket = pinfo['ticket']
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
def claim_to_other_person():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any papers selected. " + \
"Please go back and select which papers would you like to claim.")
return self._ticket_open_assign_to_other_person(req, bibrefrecs, form)
def commit_rt_ticket():
if argd['selection'] is not None:
tid = argd['selection'][0]
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
return self._commit_rt_ticket(req, tid, pid)
def confirm_repeal_reset():
if argd['pid'] > -1 or int(argd['pid']) == CREATE_NEW_PERSON:
pid = argd['pid']
cancel_search_ticket(without_return=True)
else:
return self._ticket_open_assign_to_other_person(req, argd['selection'], form)
# return self._error_page(req, ln, "Fatal: cannot create ticket without a
# person id! (crr %s)" %repr(argd))
bibrefrecs = argd['selection']
if argd['confirm']:
action = 'assign'
if pid == CREATE_NEW_PERSON:
pid = create_new_person(getUid(req))
elif argd['repeal']:
action = 'reject'
elif argd['reset']:
action = 'reset'
else:
return self._error_page(req, ln, "Fatal: not existent action!")
for bibrefrec in bibrefrecs:
form['jsondata'] = json.dumps({'pid': str(pid),
'action': action,
'bibrefrec': bibrefrec,
'on': 'user'})
t = WebInterfaceAuthorTicketHandling()
t.add_operation(req, form)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
def close_rt_ticket():
BIBCATALOG_SYSTEM.ticket_set_attribute(0, argd['rtid'], 'status', 'resolved')
remove_rtid_from_ticket(argd['rtid'], argd['pid'])
return redirect_to_url(req, "%s/author/claim/%s#tabTickets" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(argd['pid']))))
def delete_external_ids():
'''
deletes association between the user with pid and the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot delete external ids from an unknown person")
if argd['existing_ext_ids'] is not None:
existing_ext_ids = argd['existing_ext_ids']
else:
return self._error_page(req, ln,
"Fatal: you must select at least one external id in order to delete it")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.delete_person_external_ids(pid, existing_ext_ids, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
def none_action():
return self._error_page(req, ln,
"Fatal: cannot create ticket if no action selected.")
def merge():
'''
performs a merge if allowed on the profiles that the user chose
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without a primary profile!")
if argd['selection']:
profiles_to_merge = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without any profiles selected!")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
pids_to_merge = [webapi.get_person_id_from_canonical_id(cname) for cname in profiles_to_merge]
is_admin = False
if pinfo['ulevel'] == 'admin':
is_admin = True
# checking if there are restrictions regarding this merge
can_perform_merge, preventing_pid, error_message = webapi.merge_is_allowed(primary_pid, pids_to_merge, is_admin)
if not can_perform_merge:
# when redirected back to the merge profiles page display an error message
# about the currently attempted merge
session.dirty = True
req.status = apache.HTTP_CONFLICT
c_name = webapi.get_canonical_id_from_person_id(preventing_pid)
return 'Cannot merge profile: %s Reason: %s' % (c_name,
error_message)
if is_admin:
webapi.merge_profiles(primary_pid, pids_to_merge)
else:
name = ''
if 'user_last_name' in pinfo:
name = pinfo['user_last_name']
if 'user_first_name' in pinfo:
name += pinfo['user_first_name']
email = ''
if 'user_email' in pinfo:
email = pinfo['user_email']
elif 'email' in argd:
# the email was submitted in form
email = argd['email']
pinfo['form_email'] = email
selection_str = "&selection=".join(profiles_to_merge)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'merge link': "%s/author/merge_profiles?primary_profile=%s&selection=%s" % (CFG_SITE_URL, primary_cname, selection_str),
'uid': uid}
# a message is sent to the admin with info regarding the currently attempted merge
webapi.create_request_message(userinfo, subj=('Merge profiles request: %s' % primary_cname))
# when redirected back to the manage profile page display a message about the merge
pinfo['merge_info_message'] = ("success", "confirm_operation")
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, urllib.quote(primary_cname))
return redirect_to_url(req, redirect_url)
def send_message():
'''
sends a message from the user to the admin
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
# pp = pprint.PrettyPrinter(indent=4)
# session_dump = pp.pprint(pinfo)
session_dump = str(pinfo)
name = ''
name_changed = False
name_given = ''
email = ''
email_changed = False
email_given = ''
comment = ''
last_page_visited = ''
if "user_last_name" in pinfo:
name = pinfo["user_last_name"]
if "user_first_name" in pinfo:
name += pinfo["user_first_name"]
name = name.rstrip()
if "user_email" in pinfo:
email = pinfo["user_email"]
email = email.rstrip()
if 'Name' in form:
if not name:
name = form['Name']
elif name != form['Name']:
name_given = form['Name']
name_changed = True
name = name.rstrip()
if 'E-mail'in form:
if not email:
email = form['E-mail']
elif name != form['E-mail']:
email_given = form['E-mail']
email_changed = True
email = email.rstrip()
if 'Comment' in form:
comment = form['Comment']
comment = comment.rstrip()
if not name or not comment or not email:
redirect_to_url(req, '%s/author/help?incomplete_params=%s' % (CFG_SITE_URL, True))
if 'last_page_visited' in form:
last_page_visited = form['last_page_visited']
uid = getUid(req)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'comment': comment,
'last_page_visited': last_page_visited,
'session_dump': session_dump,
'name_given': name_given,
'email_given': email_given,
'name_changed': name_changed,
'email_changed': email_changed,
'uid': uid}
webapi.create_request_message(userinfo)
def set_canonical_name():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot set canonical name to unknown person")
if argd['canonical_name'] is not None:
cname = argd['canonical_name']
else:
return self._error_page(req, ln,
"Fatal: cannot set a custom canonical name without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
if webapi.is_valid_canonical_id(cname):
webapi.swap_person_canonical_name(pid, cname, userinfo)
else:
webapi.update_person_canonical_name(pid, cname, userinfo)
return redirect_to_url(req, "%s/author/claim/%s%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid)), '#tabData'))
action_functions = {'add_external_id': add_external_id,
'set_uid': set_uid,
'add_missing_external_ids': add_missing_external_ids,
'associate_profile': associate_profile,
'bibref_check_submit': bibref_check_submit,
'cancel': cancel,
'cancel_merging': cancel_merging,
'cancel_rt_ticket': cancel_rt_ticket,
'cancel_search_ticket': cancel_search_ticket,
'cancel_stage': cancel_stage,
'checkout': checkout,
'checkout_continue_claiming': checkout_continue_claiming,
'checkout_remove_transaction': checkout_remove_transaction,
'checkout_submit': checkout_submit,
'assign': claim,
'commit_rt_ticket': commit_rt_ticket,
'close_rt_ticket': close_rt_ticket,
'confirm': confirm_repeal_reset,
'delete_external_ids': delete_external_ids,
'merge': merge,
'reject': claim,
'repeal': confirm_repeal_reset,
'reset': confirm_repeal_reset,
'send_message': send_message,
'set_canonical_name': set_canonical_name,
'to_other_person': claim_to_other_person,
None: none_action}
return action_functions[action]()
def _ticket_open_assign_to_other_person(self, req, bibrefs, form):
'''
Initializes search to find a person to attach the selected records to
@param req: Apache request object
@type req: Apache request object
@param bibrefs: list of record IDs to consider
@type bibrefs: list of int
@param form: GET/POST request parameters
@type form: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
pinfo["search_ticket"] = dict()
search_ticket = pinfo["search_ticket"]
search_ticket['action'] = 'assign'
search_ticket['bibrefs'] = bibrefs
session.dirty = True
return self.search(req, form)
def _cancel_rt_ticket(self, req, tid, pid):
'''
deletes an RT ticket
'''
webapi.delete_request_ticket(pid, tid)
return redirect_to_url(req, "%s/author/claim/%s" %
(CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(str(pid)))))
def _cancel_transaction_from_rt_ticket(self, tid, pid, action, bibref):
'''
deletes a transaction from an rt ticket
'''
webapi.delete_transaction_from_request_ticket(pid, tid, action, bibref)
def _commit_rt_ticket(self, req, tid, pid):
'''
Commit of an rt ticket: creates a real ticket and commits.
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
uid = getUid(req)
tid = int(tid)
try:
rt_ticket = get_validated_request_tickets_for_author(pid, tid)[0]
except IndexError:
msg = """This ticket with the tid: %s has already been
removed.""" % tid
return self._error_page(req, message=msg)
for action, bibrefrec in rt_ticket['operations']:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
webapi.delete_request_ticket(pid, tid)
redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, urllib.quote(str(pid))))
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % BIBAUTHORID_CFG_SITE_NAME,
keywords="%s, Internal Error" % BIBAUTHORID_CFG_SITE_NAME,
language=ln,
req=req)
def __session_cleanup(self, req):
'''
Cleans the session from all bibauthorid specific settings and
with that cancels any transaction currently in progress.
@param req: Apache Request Object
@type req: Apache Request Object
'''
session = get_session(req)
try:
pinfo = session["personinfo"]
except KeyError:
return
if "ticket" in pinfo:
pinfo['ticket'] = []
if "search_ticket" in pinfo:
pinfo['search_ticket'] = dict()
# clear up bibref checker if it's done.
if ("bibref_check_required" in pinfo
and not pinfo["bibref_check_required"]):
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
if "bibrefs_auto_assigned" in pinfo:
del(pinfo["bibrefs_auto_assigned"])
del(pinfo["bibref_check_required"])
if "checkout_confirmed" in pinfo:
del(pinfo["checkout_confirmed"])
if "checkout_faulty_fields" in pinfo:
del(pinfo["checkout_faulty_fields"])
# pinfo['ulevel'] = ulevel
# pinfo["claimpaper_admin_last_viewed_pid"] = -1
pinfo["admin_requested_ticket_id"] = -1
session.dirty = True
def _generate_search_ticket_box(self, req):
'''
Generate the search ticket to remember a pending search for Person
entities in an attribution process
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
pinfo = session["personinfo"]
search_ticket = None
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
if not search_ticket:
return ''
else:
return TEMPLATE.tmpl_search_ticket_box('person_search', 'assign_papers', search_ticket['bibrefs'])
def search_box(self, query, shown_element_functions):
'''
collecting the persons' data that the search function returned
@param req: Apache request object
@type req: Apache request object
@param query: the query string
@type query: string
@param shown_element_functions: contains the functions that will tell to the template which columns to show and what buttons to print
@type shown_element_functions: dict
@return: html body
@rtype: string
'''
pid_list = self._perform_search(query)
search_results = []
for pid in pid_list:
result = defaultdict(list)
result['pid'] = pid
result['canonical_id'] = webapi.get_canonical_id_from_person_id(pid)
result['name_variants'] = webapi.get_person_names_from_id(pid)
result['external_ids'] = webapi.get_external_ids_from_person_id(pid)
# this variable shows if we want to use the following data in the search template
if 'pass_status' in shown_element_functions and shown_element_functions['pass_status']:
result['status'] = webapi.is_profile_available(pid)
search_results.append(result)
body = TEMPLATE.tmpl_author_search(query, search_results, shown_element_functions)
body = TEMPLATE.tmpl_person_detail_layout(body)
return body
def search(self, req, form):
'''
Function used for searching a person based on a name with which the
function is queried.
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0),
'q': (str, None)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
try:
int(cname)
except ValueError:
is_owner = False
else:
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "search", ln, is_owner, self._is_admin(pinfo))
title = "Person search"
# Create Wrapper Page Markup
profile_page = WebProfilePage("search", title, no_cache=True)
profile_page.add_bootstrapped_data(json.dumps({
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
no_access = self._page_access_permission_wall(req)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_general_search_bar()
if no_access:
return no_access
search_ticket = None
bibrefs = []
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
for r in search_ticket['bibrefs']:
bibrefs.append(r)
if search_ticket and "ulevel" in pinfo:
if pinfo["ulevel"] == "admin":
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_assigning_search_new_person_generator(bibrefs)
content = ""
if search_ticket:
shown_element_functions['button_gen'] = TEMPLATE.tmpl_assigning_search_button_generator(bibrefs)
content = content + self._generate_search_ticket_box(req)
query = None
if 'q' in argd:
if argd['q']:
query = escape(argd['q'])
content += self.search_box(query, shown_element_functions)
body = profile_page.get_wrapped_body("generic", {'html': content})
parameter = None
if query:
parameter = '?search_param=%s' + query
webapi.history_log_visit(req, 'search', params=parameter)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def merge_profiles(self, req, form):
'''
begginig of the proccess that performs the merge over multipe person profiles
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'primary_profile': (str, None),
'search_param': (str, ''),
'selection': (list, None),
'verbose': (int, 0)})
ln = argd['ln']
primary_cname = argd['primary_profile']
search_param = argd['search_param']
selection = argd['selection']
debug = 'verbose' in argd and argd['verbose'] > 0
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
profiles_to_merge = pinfo['merge_profiles']
_ = gettext_set_language(ln)
if not primary_cname:
return page_not_authorized(req, text=_('This page is not accessible directly.'))
no_access = self._page_access_permission_wall(req)
if no_access:
return no_access
if selection is not None:
profiles_to_merge_session = [cname for cname, is_available in profiles_to_merge]
for profile in selection:
if profile not in profiles_to_merge_session:
pid = webapi.get_person_id_from_canonical_id(profile)
is_available = webapi.is_profile_available(pid)
pinfo['merge_profiles'].append([profile, '1' if is_available else '0'])
session.dirty = True
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
is_available = webapi.is_profile_available(primary_pid)
if not session['personinfo']['merge_primary_profile']:
session['personinfo']['merge_primary_profile'] = [primary_cname, '1' if is_available else '0']
session.dirty = True
body = ''
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
title = 'Merge Profiles'
menu = WebProfileMenu(str(cname), "manage_profile", ln, is_owner, self._is_admin(pinfo))
merge_page = WebProfilePage("merge_profile", title, no_cache=True)
merge_page.add_profile_menu(menu)
if debug:
merge_page.add_debug_info(pinfo)
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
body += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
body += TEMPLATE.tmpl_merge_ticket_box('person_search', 'merge_profiles', primary_cname)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_merge_profiles_search_bar(primary_cname)
shown_element_functions['button_gen'] = TEMPLATE.merge_profiles_button_generator()
shown_element_functions['pass_status'] = 'True'
gFormEmail = ""
if 'form_email' in pinfo:
gFormEmail = pinfo['form_email']
merge_page.add_bootstrapped_data(json.dumps({
"other": ("var gMergeProfile = %s; var gMergeList = %s;" +
"var gUserLevel = '%s'; var gFormEmail = '%s';") %
([primary_cname, '1' if is_available else '0'],
profiles_to_merge, pinfo['ulevel'], gFormEmail)
}))
body += self.search_box(search_param, shown_element_functions)
body = merge_page.get_wrapped_body("generic", {'html': body})
return page(title=title,
metaheaderadd=merge_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _perform_search(self, search_param):
'''
calls the search function on the search_param and returns the results
@param search_param: query string
@type search_param: String
@return: list of pids that the search found they match with the search query
@return: list
'''
pid_canditates_list = []
nquery = None
if search_param:
if search_param.count(":"):
try:
left, right = search_param.split(":")
try:
nsearch_param = str(right)
except (ValueError, TypeError):
try:
nsearch_param = str(left)
except (ValueError, TypeError):
nsearch_param = search_param
except ValueError:
nsearch_param = search_param
else:
nsearch_param = search_param
sorted_results = webapi.search_person_ids_by_name(nsearch_param)
for result in sorted_results:
pid_canditates_list.append(result[0])
return pid_canditates_list
def merge_profiles_ajax(self, req, form):
'''
Function used for handling Ajax requests used in order to add/remove profiles
in/from the merging profiles list, which is saved in the session.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if 'jsondata' in form:
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'addProfile':
if 'profile' in json_data:
profile = json_data['profile']
person_id = webapi.get_person_id_from_canonical_id(profile)
if person_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
profile_availability = webapi.is_profile_available(person_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
if profile not in [el[0] for el in profiles_to_merge]:
profiles_to_merge.append([profile, profile_availability])
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'addedPofile': profile})
json_response.update({'addedPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'removeProfile':
if 'profile' in json_data:
profile = json_data['profile']
if webapi.get_person_id_from_canonical_id(profile) != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
# print (str(profiles_to_merge))
if profile in [el[0] for el in profiles_to_merge]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'removedProfile': profile})
else:
json_response.update({'result': 'Error: Profile was missing already from the list'})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'setPrimaryProfile':
if 'profile' in json_data:
profile = json_data['profile']
profile_id = webapi.get_person_id_from_canonical_id(profile)
if profile_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profile_availability = webapi.is_profile_available(profile_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
profiles_to_merge = session["personinfo"]["merge_profiles"]
if profile in [el[0] for el in profiles_to_merge if el and el[0]]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
primary_profile = session["personinfo"]["merge_primary_profile"]
if primary_profile and primary_profile not in profiles_to_merge:
profiles_to_merge.append(primary_profile)
session["personinfo"]["merge_primary_profile"] = [profile, profile_availability]
session.dirty = True
json_response.update({'resultCode': 1})
json_response.update({'primaryProfile': profile})
json_response.update({'primaryPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def search_box_ajax(self, req, form):
'''
Function used for handling Ajax requests used in the search box.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if 'jsondata' in form:
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'getPapers':
if 'personId' in json_data:
pId = json_data['personId']
papers = sorted([[p[0]] for p in webapi.get_papers_by_person_id(int(pId), -1)],
key=itemgetter(0))
papers_html = TEMPLATE.tmpl_gen_papers(papers[0:MAX_NUM_SHOW_PAPERS])
json_response.update({'result': "\n".join(papers_html)})
json_response.update({'totalPapers': len(papers)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Missing person id'})
elif req_type == 'getNames':
if 'personId' in json_data:
pId = json_data['personId']
names = webapi.get_person_names_from_id(int(pId))
names_html = TEMPLATE.tmpl_gen_names(names)
json_response.update({'result': "\n".join(names_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'getIDs':
if 'personId' in json_data:
pId = json_data['personId']
ids = webapi.get_external_ids_from_person_id(int(pId))
ids_html = TEMPLATE.tmpl_gen_ext_ids(ids)
json_response.update({'result': "\n".join(ids_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'isProfileClaimed':
if 'personId' in json_data:
pId = json_data['personId']
isClaimed = webapi.get_uid_from_personid(pId)
if isClaimed != -1:
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def choose_profile(self, req, form):
'''
Generate SSO landing/choose_profile page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'search_param': (str, None),
'failed': (str, None),
'verbose': (int, 0)})
ln = argd['ln']
debug = "verbose" in argd and argd["verbose"] > 0
req.argd = argd # needed for perform_req_search
search_param = argd['search_param']
webapi.session_bareinit(req)
session = get_session(req)
uid = getUid(req)
pinfo = session['personinfo']
failed = True
if not argd['failed']:
failed = False
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
if 'arXiv' not in login_info['logged_in_to_remote_systems']:
return page_not_authorized(req, text=_("This page is not accessible directly."))
pid = webapi.get_user_pid(login_info['uid'])
# Create Wrapper Page Markup
is_owner = False
menu = WebProfileMenu('', "choose_profile", ln, is_owner, self._is_admin(pinfo))
choose_page = WebProfilePage("choose_profile", "Choose your profile", no_cache=True)
choose_page.add_profile_menu(menu)
if debug:
choose_page.add_debug_info(pinfo)
content = TEMPLATE.tmpl_choose_profile(failed)
body = choose_page.get_wrapped_body("generic", {'html': content})
# In any case, when we step by here, an autoclaim should be performed right after!
pinfo = session["personinfo"]
pinfo['should_check_to_autoclaim'] = True
session.dirty = True
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
# if already logged in then redirect the user to the page he was viewing
if pid != -1:
redirect_pid = pid
if last_visited_pid:
redirect_pid = last_visited_pid
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, urllib.quote(str(redirect_pid))))
else:
# get name strings and email addresses from SSO/Oauth logins:
# {'system':{'name':[variant1,...,variantn], 'email':'blabla@bla.bla',
# 'pants_size':20}}
remote_login_systems_info = webapi.get_remote_login_systems_info(
req, login_info['logged_in_to_remote_systems'])
# get union of recids that are associated to the ids from all the external systems: set(inspire_recids_list)
recids = webapi.get_remote_login_systems_recids(req, login_info['logged_in_to_remote_systems'])
# this is the profile with the biggest intersection of papers so it's
# more probable that this is the profile the user seeks
probable_pid = webapi.match_profile(req, recids, remote_login_systems_info)
# if not search_param and probable_pid > -1 and probable_pid == last_visited_pid:
# try to assign the user to the profile he chose. If for some reason the profile is not available we assign him to an empty profile
# redirect_pid, profile_claimed = webapi.claim_profile(login_info['uid'], probable_pid)
# if profile_claimed:
# redirect_to_url(req,
# '%s/author/claim/action?associate_profile=True&redirect_pid=%s' %
# (CFG_SITE_URL, str(redirect_pid)))
probable_profile_suggestion_info = None
last_viewed_profile_suggestion_info = None
if last_visited_pid > -1 and webapi.is_profile_available(last_visited_pid):
# get information about the most probable profile and show it to the user
last_viewed_profile_suggestion_info = webapi.get_profile_suggestion_info(req, last_visited_pid, recids)
if probable_pid > -1 and webapi.is_profile_available(probable_pid):
# get information about the most probable profile and show it to the user
probable_profile_suggestion_info = webapi.get_profile_suggestion_info(req, probable_pid, recids)
if not search_param:
# we prefil the search with most relevant among the names that we get from external systems
name_variants = webapi.get_name_variants_list_from_remote_systems_names(remote_login_systems_info)
search_param = most_relevant_name(name_variants)
body = body + TEMPLATE.tmpl_probable_profile_suggestion(
probable_profile_suggestion_info,
last_viewed_profile_suggestion_info,
search_param)
free_id = get_free_author_id()
shown_element_functions = dict()
shown_element_functions['button_gen'] = TEMPLATE.tmpl_choose_profile_search_button_generator()
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_choose_profile_search_new_person_generator(free_id)
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_choose_profile_search_bar()
# show in the templates the column status (if profile is bound to a user or not)
shown_element_functions['show_status'] = True
# pass in the templates the data of the column status (if profile is bound to a user or not)
# we might need the data without having to show them in the columne (fi merge_profiles
shown_element_functions['pass_status'] = True
# show search results to the user
body = body + self.search_box(search_param, shown_element_functions)
body = body + TEMPLATE.tmpl_choose_profile_footer()
title = _(' ')
return page(title=title,
metaheaderadd=choose_page.get_head().encode('utf-8'),
body=body,
req=req,
language=ln)
@staticmethod
def _arxiv_box(req, login_info, person_id, user_pid):
'''
Proccess and collect data for arXiv box
@param req: Apache request object
@type req: Apache request object
@param login_info: status of login in the following format: {'logged_in': True, 'uid': 2, 'logged_in_to_remote_systems':['Arxiv', ...]}
@type login_info: dict
@param login_info: person id of the current page's profile
@type login_info: int
@param login_info: person id of the user
@type login_info: int
@return: data required to built the arXiv box
@rtype: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
arxiv_data = dict()
arxiv_data['view_own_profile'] = person_id == user_pid
# if the user is not a guest and he is connected through arXiv
arxiv_data['login'] = login_info['logged_in']
arxiv_data['user_pid'] = user_pid
arxiv_data['user_has_pid'] = user_pid != -1
# if the profile the use is logged in is the same with the profile of the page that the user views
arxiv_data['view_own_profile'] = user_pid == person_id
return arxiv_data
@staticmethod
def _orcid_box(arxiv_logged_in, person_id, user_pid, ulevel):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param arxiv_logged_in: shows if the user is logged in through arXiv or not
@type arxiv_logged_in: boolean
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param ulevel: user's level
@type ulevel: string
@return: data required to built the orcid box
@rtype: dict
'''
orcid_data = dict()
orcid_data['arxiv_login'] = arxiv_logged_in
orcid_data['orcids'] = None
orcid_data['add_power'] = False
orcid_data['own_profile'] = False
orcid_data['pid'] = person_id
# Indicates whether we should push the works or not.
orcid_data['push'] = not get_token(person_id)
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
orcid_data['own_profile'] = True
# if the user is an admin then he can add an existing orcid to the profile
if ulevel == "admin":
orcid_data['add_power'] = True
orcids = webapi.get_orcids_by_pid(person_id)
if orcids:
orcid_data['orcids'] = orcids
return orcid_data
@staticmethod
def _autoclaim_papers_box(req, person_id, user_pid, remote_logged_in_systems):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the autoclaim box
@rtype: dict
'''
autoclaim_data = dict()
# if no autoclaim should occur or had occured and results should be shown then the box should remain hidden
autoclaim_data['hidden'] = True
autoclaim_data['person_id'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
recids_to_autoclaim = webapi.get_remote_login_systems_recids(req, remote_logged_in_systems)
autoclaim_data['hidden'] = False
autoclaim_data['num_of_claims'] = len(recids_to_autoclaim)
return autoclaim_data
@staticmethod
def get_params_to_check_login_info(session):
def get_params_to_check_login_info_of_arxiv(session):
try:
return session['user_info']
except KeyError:
return None
def get_params_to_check_login_info_of_orcid(session):
pinfo = session['personinfo']
try:
pinfo['orcid']['has_orcid_id'] = bool(
get_orcid_id_of_author(pinfo['pid'])[0][0] and pinfo['orcid']['import_pubs'])
except:
pinfo['orcid']['has_orcid_id'] = False
session.dirty = True
return pinfo['orcid']
get_params_for_remote_system = {'arXiv': get_params_to_check_login_info_of_arxiv,
'orcid': get_params_to_check_login_info_of_orcid}
params = dict()
for system, get_params in get_params_for_remote_system.iteritems():
params[system] = get_params(session)
return params
@staticmethod
def _claim_paper_box(person_id):
'''
Proccess and collect data for claim paper box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the claim paper box
@rtype: dict
'''
claim_paper_data = dict()
claim_paper_data['canonical_id'] = str(webapi.get_canonical_id_from_person_id(person_id))
return claim_paper_data
@staticmethod
def _support_box():
'''
Proccess and collect data for support box
@return: data required to built the support box
@rtype: dict
'''
support_data = dict()
return support_data
@staticmethod
def _merge_box(person_id):
'''
Proccess and collect data for merge box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the merge box
@rtype: dict
'''
merge_data = dict()
search_param = webapi.get_canonical_id_from_person_id(person_id)
name_variants = [element[0] for element in webapi.get_person_names_from_id(person_id)]
mr_name = most_relevant_name(name_variants)
if mr_name:
search_param = mr_name.split(",")[0]
merge_data['search_param'] = search_param
merge_data['canonical_id'] = webapi.get_canonical_id_from_person_id(person_id)
return merge_data
@staticmethod
def _internal_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
external_ids_data = dict()
external_ids_data['uid'], external_ids_data['old_uids'] = webapi.get_internal_user_id_from_person_id(person_id)
external_ids_data['person_id'] = person_id
external_ids_data['user_pid'] = user_pid
external_ids_data['ulevel'] = ulevel
return external_ids_data
@staticmethod
def _external_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
internal_ids_data = dict()
internal_ids_data['ext_ids'] = webapi.get_external_ids_from_person_id(person_id)
internal_ids_data['person_id'] = person_id
internal_ids_data['user_pid'] = user_pid
internal_ids_data['ulevel'] = ulevel
return internal_ids_data
@staticmethod
def _hepnames_box(person_id):
return webapi.get_hepnames(person_id)
def tickets_admin(self, req, form):
'''
Generate SSO landing/welcome page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
webapi.session_bareinit(req)
no_access = self._page_access_permission_wall(req, req_level='admin')
if no_access:
return no_access
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "open_tickets", ln, is_owner, self._is_admin(pinfo))
title = "Open RT tickets"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
tickets = webapi.get_persons_with_open_tickets_list()
tickets = list(tickets)
for t in list(tickets):
tickets.remove(t)
tickets.append([clean_string(webapi.get_most_frequent_name_from_pid(int(t[0]))),
webapi.get_person_redirect_link(t[0]), t[0], t[1]])
content = TEMPLATE.tmpl_tickets_admin(tickets)
content = TEMPLATE.tmpl_person_detail_layout(content)
body = profile_page.get_wrapped_body("generic", {'html': content})
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def help(self, req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_BIBAUTHORID_ENABLED:
return page_not_authorized(req, text=_("This page is not accessible directly."))
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
title = "Help Center"
profile_page = WebProfilePage("help", title, no_cache=True)
template_parameters = {'base_url': CFG_BASE_URL}
body = profile_page.get_wrapped_body("help", template_parameters)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def export(self, req, form):
'''
Generate JSONized export of Person data
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'request': (str, None),
'userid': (str, None)})
if not CFG_JSON_AVAILABLE:
return "500_json_not_found__install_package"
# session = get_session(req)
request = None
userid = None
if "userid" in argd and argd['userid']:
userid = argd['userid']
else:
return "404_user_not_found"
if "request" in argd and argd['request']:
request = argd["request"]
# find user from ID
user_email = get_email_from_username(userid)
if user_email == userid:
return "404_user_not_found"
uid = get_uid_from_email(user_email)
uinfo = collect_user_info(uid)
# find person by uid
pid = webapi.get_pid_from_uid(uid)
# find papers py pid that are confirmed through a human.
papers = webapi.get_papers_by_person_id(pid, 2)
# filter by request param, e.g. arxiv
if not request:
return "404__no_filter_selected"
if not request in VALID_EXPORT_FILTERS:
return "500_filter_invalid"
if request == "arxiv":
query = "(recid:"
query += " OR recid:".join(papers)
query += ") AND 037:arxiv"
db_docs = perform_request_search(p=query, rg=0)
nickmail = ""
nickname = ""
db_arxiv_ids = []
try:
nickname = uinfo["nickname"]
except KeyError:
pass
if not nickname:
try:
nickmail = uinfo["email"]
except KeyError:
nickmail = user_email
nickname = nickmail
db_arxiv_ids = get_fieldvalues(db_docs, "037__a")
construct = {"nickname": nickname,
"claims": ";".join(db_arxiv_ids)}
jsondmp = json.dumps(construct)
signature = webapi.sign_assertion("arXiv", jsondmp)
construct["digest"] = signature
return json.dumps(construct)
index = __call__
class WebInterfaceBibAuthorIDManageProfilePages(WebInterfaceDirectory):
_exports = ['',
'import_orcid_pubs',
'push_orcid_pubs',
'connect_author_with_hepname',
'connect_author_with_hepname_ajax',
'suggest_orcid',
'suggest_orcid_ajax']
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDManageProfilePages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
self.original_identifier = str()
return
else:
self.original_identifier = identifier
# check if it's a canonical id: e.g. "J.R.Ellis.1"
try:
pid = int(identifier)
except ValueError:
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def _get_orcid_token(self, session, pinfo):
if 'oauth2_access_token' not in session:
return None
token = session['oauth2_access_token']
if token != '':
return token
return None
def __call__(self, req, form):
'''
Generate SSO landing/author management page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
pinfo['claim_in_process'] = True
argd = wash_urlargd(form, {
'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_BIBAUTHORID_ENABLED or self.person_id is None:
return page_not_authorized(req, text=_("This page is not accessible directly."))
if person_id < 0:
return self._error_page(req, message=("Identifier %s is not a valid person identifier or does not exist anymore!" % self.original_identifier))
# log the visit
webapi.history_log_visit(req, 'manage_profile', pid=person_id)
# store the arxiv papers the user owns
if uid > 0 and not pinfo['arxiv_status']:
uinfo = collect_user_info(req)
arxiv_papers = list()
if 'external_arxivids' in uinfo and uinfo['external_arxivids']:
arxiv_papers = uinfo['external_arxivids'].split(';')
if arxiv_papers:
webapi.add_arxiv_papers_to_author(arxiv_papers, person_id)
pinfo['arxiv_status'] = True
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
# Create Wrapper Page Markup
cname = webapi.get_canonical_id_from_person_id(self.person_id)
long_name = webapi.get_longest_name_from_pid(self.person_id)
# TODO: Replace dash with —
page_title = "%s - %s" % (long_name, _('Manage Profile'))
menu = WebProfileMenu(
str(cname),
"manage_profile",
ln,
self._is_profile_owner(pinfo['pid']),
self._is_admin(pinfo))
profile_page = WebProfilePage("manage_profile", long_name, no_cache=True)
profile_page.add_profile_menu(menu)
profile_page.add_bootstrapped_data(json.dumps({
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
user_pid = webapi.get_user_pid(login_info['uid'])
person_data = webapi.get_person_info_by_pid(person_id)
arxiv_data = WebInterfaceBibAuthorIDClaimPages._arxiv_box(req, login_info, person_id, user_pid)
orcid_data = WebInterfaceBibAuthorIDClaimPages._orcid_box(arxiv_data['login'], person_id, user_pid, ulevel)
orcid_data['token'] = self._get_orcid_token(session, pinfo)
claim_paper_data = WebInterfaceBibAuthorIDClaimPages._claim_paper_box(person_id)
support_data = WebInterfaceBibAuthorIDClaimPages._support_box()
ids_box_html = None
if ulevel == 'admin':
ext_ids_data = WebInterfaceBibAuthorIDClaimPages._external_ids_box(person_id, user_pid, ulevel)
int_ids_data = WebInterfaceBibAuthorIDClaimPages._internal_ids_box(person_id, user_pid, ulevel)
ids_box_html = TEMPLATE.tmpl_ext_ids_box(
person_id,
int_ids_data,
ext_ids_data,
ln,
add_box=False,
loading=False)
autoclaim_data = WebInterfaceBibAuthorIDClaimPages._autoclaim_papers_box(
req, person_id, user_pid, login_info['logged_in_to_remote_systems'])
merge_data = WebInterfaceBibAuthorIDClaimPages._merge_box(person_id)
hepnames_data = WebInterfaceBibAuthorIDClaimPages._hepnames_box(person_id)
content = ''
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
content += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
modal = ''
if 'orcid_info' in session:
orcid_info = session['orcid_info']['status']
else:
orcid_info = ''
if CFG_INSPIRE_SITE:
html_arxiv = TEMPLATE.tmpl_arxiv_box(arxiv_data, ln, add_box=False, loading=False)
html_orcid, modal = TEMPLATE.tmpl_orcid_box(orcid_data, ln, orcid_info, add_box=False, loading=False)
if hepnames_data is not None:
hepnames_data.update({
'cname': webapi.get_canonical_id_from_person_id(person_id),
'link_to_record': ulevel == "admin",
'hepnames_link': "%s/%s/" % (CFG_BASE_URL, "record"),
'new_record_link': 'https://labs.inspirehep.net/author/new',
'update_link': "http://labs.inspirehep.net/author/update?recid=",
'profile_link': "%s/%s" % (CFG_BASE_URL, "author/profile/")
})
html_hepnames = WebProfilePage.render_template('personal_details_box', hepnames_data)
else:
html_hepnames = "Loading.."
html_support = TEMPLATE.tmpl_support_box(support_data, ln, add_box=False, loading=False)
if autoclaim_data['hidden']:
autoclaim_successful_recs = None
autoclaim_unsuccessful_recs = None
else:
if not pinfo['orcid']['import_pubs'] and pinfo['autoclaim']['res'] is not None:
autoclaim_data = pinfo['autoclaim']['res']
autoclaim_successful_recs = autoclaim_data['successful_recids']
autoclaim_unsuccessful_recs = autoclaim_data['unsuccessful_recids']
else:
login_status = webapi.get_login_info(uid, params)
autoclaim_ticket = pinfo['autoclaim']['ticket']
external_pubs_association = pinfo['autoclaim']['external_pubs_association']
remote_systems = login_status['logged_in_to_remote_systems']
papers_to_autoclaim = set(webapi.get_papers_from_remote_systems(remote_systems,
params,
external_pubs_association))
for paper in papers_to_autoclaim:
| operation_parts = {'pid': person_id, | 8,290 | lcc_e | python | null | 5aa904241ceb318da41a26ce573b2842824733d61c10c768 |
|
"""
Student Views
"""
import datetime
import logging
import uuid
import time
import json
import warnings
from collections import defaultdict
from pytz import UTC
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseServerError, Http404)
from django.shortcuts import redirect
from django.utils.translation import ungettext
from django_future.csrf import ensure_csrf_cookie
from django.utils.http import cookie_date, base36_to_int
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.response import TemplateResponse
from ratelimitbackend.exceptions import RateLimitException
from requests import HTTPError
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile, PendingNameChange,
PendingEmailChange, CourseEnrollment, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration)
from student.forms import AccountCreationForm, PasswordResetFormNoActive
from verify_student.models import SoftwareSecurePhotoVerification, MidcourseReverificationWindow
from certificates.models import CertificateStatuses, certificate_status_for_student
from dark_lang.models import DarkLangConfig
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore import ModuleStoreEnum
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from bulk_email.models import Optout, CourseAuthorization
import shoppingcart
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import commit_on_success_with_read_committed
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from microsite_configuration import microsite
from util.password_policy_validators import (
validate_password_length, validate_password_complexity,
validate_password_dictionary
)
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
auth_pipeline_urls, set_logged_in_cookie,
check_verify_status_by_course
)
from xmodule.error_module import ErrorDescriptor
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
# Note that this lives in openedx, so this dependency should be refactored.
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
# The course selection work is done in courseware.courses.
domain = settings.FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False
# do explicit check, because domain=None is valid
if domain is False:
domain = request.META.get('HTTP_HOST')
courses = get_courses(user, domain=domain)
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course. Returns a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
"""
if not course.may_certify():
return {}
return _cert_info(user, course, certificate_status_for_student(user, course.id), course_mode)
def reverification_info(course_enrollment_pairs, user, statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in status_list
Args:
course_enrollment_pairs (list): list of (course, enrollment) tuples
user (User): the user whose information we want
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
for (course, enrollment) in course_enrollment_pairs:
info = single_course_reverification_info(user, course, enrollment)
if info:
reverifications[info.status].append(info)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def single_course_reverification_info(user, course, enrollment): # pylint: disable=invalid-name
"""Returns midcourse reverification-related information for user with enrollment in course.
If a course has an open re-verification window, and that user has a verified enrollment in
the course, we return a tuple with relevant information. Returns None if there is no info..
Args:
user (User): the user we want to get information for
course (Course): the course in which the student is enrolled
enrollment (CourseEnrollment): the object representing the type of enrollment user has in course
Returns:
ReverifyInfo: (course_id, course_name, course_number, date, status)
OR, None: None if there is no re-verification info for this enrollment
"""
window = MidcourseReverificationWindow.get_window(course.id, datetime.datetime.now(UTC))
# If there's no window OR the user is not verified, we don't get reverification info
if (not window) or (enrollment.mode != "verified"):
return None
return ReverifyInfo(
course.id, course.display_name, course.number,
window.end_date.strftime('%B %d, %Y %X %p'),
SoftwareSecurePhotoVerification.user_status(user, window)[0],
SoftwareSecurePhotoVerification.display_status(user, window),
)
def get_course_enrollment_pairs(user, course_org_filter, org_filter_out_set):
"""
Get the relevant set of (Course, CourseEnrollment) pairs to be displayed on
a student's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
store = modulestore()
with store.bulk_operations(enrollment.course_id):
course = store.get_course(enrollment.course_id)
if course and not isinstance(course, ErrorDescriptor):
# if we are in a Microsite, then filter out anything that is not
# attributed (by ORG) to that Microsite
if course_org_filter and course_org_filter != course.location.org:
continue
# Conversely, if we are not in a Microsite, then let's filter out any enrollments
# with courses attributed (by ORG) to Microsites
elif course.location.org in org_filter_out_set:
continue
yield (course, enrollment)
else:
log.error(
u"User %s enrolled in %s course %s",
user.username,
"broken" if course else "non-existent",
enrollment.course_id
)
def _cert_info(user, course, cert_status, course_mode):
"""
Implements the logic for cert_info -- split out for testing.
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.regenerating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
}
default_status = 'processing'
default_info = {'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing')
if course.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return None
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None
}
if (status in ('generating', 'ready', 'notpassing', 'restricted') and
course.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
if 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
if linkedin_config.enabled:
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course.id,
course.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
if request.user.is_authenticated():
return redirect(reverse('dashboard'))
course_id = request.GET.get('course_id')
email_opt_in = request.GET.get('email_opt_in')
context = {
'course_id': course_id,
'email_opt_in': email_opt_in,
'enrollment_action': request.GET.get('enrollment_action'),
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, course_id=course_id, email_opt_in=email_opt_in),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
if request.user.is_authenticated():
return redirect(reverse('dashboard'))
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
course_id = request.GET.get('course_id')
email_opt_in = request.GET.get('email_opt_in')
context = {
'course_id': course_id,
'email_opt_in': email_opt_in,
'email': '',
'enrollment_action': request.GET.get('enrollment_action'),
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, course_id=course_id, email_opt_in=email_opt_in),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_by_backend_name(running_pipeline.get('backend'))
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.NAME
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already verified and if verified is an
# option
if 'verified' in modes and enrollment.mode != 'verified':
mode_info['show_upsell'] = True
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not getattr(redeemed_registration.invoice_item.invoice, 'is_valid'):
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key
)
track.views.server_track(request, "change-email1-settings", {"receive_emails": "no", "course": course_key.to_deprecated_string()}, page='dashboard')
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
user = request.user
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
# for microsites, we want to filter and only show enrollments for courses within
# the microsites 'ORG'
course_org_filter = microsite.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a Microsite
org_filter_out_set = microsite.get_all_orgs()
# remove our current Microsite from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollment_pairs = list(get_course_enrollment_pairs(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollment_pairs.sort(key=lambda x: x[1].created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [course.id for course, __ in course_enrollment_pairs]
all_course_modes, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollment_pairs, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
course.id for course, _enrollment in course_enrollment_pairs
if has_access(request.user, 'load', course)
and has_access(request.user, 'view_courseware_with_prerequisites', course)
)
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
course.id: complete_course_mode_info(
course.id, enrollment,
modes=course_modes_by_course[course.id]
)
for course, enrollment in course_enrollment_pairs
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(
user,
course_enrollment_pairs,
all_course_modes
)
cert_statuses = {
course.id: cert_info(request.user, course, _enrollment.mode)
for course, _enrollment in course_enrollment_pairs
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
course.id for course, _enrollment in course_enrollment_pairs if (
settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] and
modulestore().get_modulestore_type(course.id) != ModuleStoreEnum.Type.xml and
CourseAuthorization.instructor_email_enabled(course.id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(course_enrollment_pairs, user, statuses)
show_refund_option_for = frozenset(course.id for course, _enrollment in course_enrollment_pairs
if _enrollment.refundable())
block_courses = frozenset(course.id for course, enrollment in course_enrollment_pairs
if is_course_blocked(request, CourseRegistrationCode.objects.filter(course_id=course.id, registrationcoderedemption__redeemed_by=request.user), course.id))
enrolled_courses_either_paid = frozenset(course.id for course, _enrollment in course_enrollment_pairs
if _enrollment.is_paid_course())
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(course.id for course, _enrollment in course_enrollment_pairs
if course.pre_requisite_courses)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
ccx_membership_triplets = []
if settings.FEATURES.get('CUSTOM_COURSES_EDX', False):
from ccx import ACTIVE_CCX_KEY
from ccx.utils import get_ccx_membership_triplets
ccx_membership_triplets = get_ccx_membership_triplets(
user, course_org_filter, org_filter_out_set
)
# should we deselect any active CCX at this time so that we don't have
# to change the URL for viewing a course? I think so.
request.session[ACTIVE_CCX_KEY] = None
context = {
'enrollment_message': enrollment_message,
'course_enrollment_pairs': course_enrollment_pairs,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse(logout_user),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'ccx_membership_triplets': ccx_membership_triplets,
}
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollment_pairs, course_modes):
"""Builds a recent course enrollment message
Constructs a new message template based on any recent course enrollments for the student.
Args:
course_enrollment_pairs (list): A list of tuples containing courses, and the associated enrollment information.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollment_pairs)
if recently_enrolled_courses:
messages = [
{
"course_id": course.id,
"course_name": course.display_name,
"allow_donation": _allow_donation(course_modes, course.id, enrollment)
}
for course, enrollment in recently_enrolled_courses
]
platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollment_pairs):
"""Checks to see if the student has recently enrolled in courses.
Checks to see if any of the enrollments in the course_enrollment_pairs have been recently created and activated.
Args:
course_enrollment_pairs (list): A list of tuples containing courses, and the associated enrollment information.
Returns:
A list of courses
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
(course, enrollment) for course, enrollment in course_enrollment_pairs
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
donations_enabled = DonationConfiguration.current().enabled
return donations_enabled and enrollment.mode in course_modes[course_id] and course_modes[course_id][enrollment.mode].min_price == 0
def try_change_enrollment(request):
"""
This method calls change_enrollment if the necessary POST
parameters are present, but does not return anything in most cases. It
simply logs the result or exception. This is usually
called after a registration or login, as secondary action.
It should not interrupt a successful registration or login.
"""
if 'enrollment_action' in request.POST:
try:
enrollment_response = change_enrollment(request)
# There isn't really a way to display the results to the user, so we just log it
# We expect the enrollment to be a success, and will show up on the dashboard anyway
log.info(
u"Attempted to automatically enroll after login. Response code: %s; response body: %s",
enrollment_response.status_code,
enrollment_response.content
)
# Hack: since change_enrollment delivers its redirect_url in the content
# of its response, we check here that only the 200 codes with content
# will return redirect_urls.
if enrollment_response.status_code == 200 and enrollment_response.content != '':
return enrollment_response.content
except Exception as exc: # pylint: disable=broad-except
log.exception(u"Exception automatically enrolling after login: %s", exc)
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
@require_POST
@commit_on_success_with_read_committed
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request or
as a post-login/registration helper, so the error messages in the responses
should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (honor)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "honor".
try:
CourseEnrollment.enroll(user, course_id, check_access=check_access)
except Exception:
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "add_to_cart":
# Pass the request handling to shoppingcart.views
# The view in shoppingcart.views performs error handling and logs different errors. But this elif clause
# is only used in the "auto-add after user reg/login" case, i.e. it's always wrapped in try_change_enrollment.
# This means there's no good way to display error messages to the user. So we log the errors and send
# the user to the shopping cart page always, where they can reasonably discern the status of their cart,
# whether things got added, etc
shoppingcart.views.add_course_to_cart(request, course_id.to_deprecated_string())
return HttpResponse(
reverse("shoppingcart.views.show_cart")
)
elif action == "unenroll":
if not CourseEnrollment.is_enrolled(user, course_id):
return HttpResponseBadRequest(_("You are not enrolled in this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
@never_cache
@ensure_csrf_cookie
def accounts_login(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
redirect_to = request.GET.get('next')
context = {
'pipeline_running': 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': settings.PLATFORM_NAME,
}
return render_to_response('login.html', context)
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable-msg=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
# reidransom: For some reason random characters get appended to
# `running_pipeline['kwargs']['username']`.
#username = running_pipeline['kwargs'].get('username')
username = running_pipeline['kwargs']['details'].get('username')
backend_name = running_pipeline['backend']
requested_provider = provider.Registry.get_by_backend_name(backend_name)
try:
user = pipeline.get_authenticated_user(username, backend_name)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u'Login failed - user with username {username} has no social auth with backend_name {backend_name}'.format(
username=username, backend_name=backend_name))
return HttpResponse(
_("You've successfully logged into your {provider_name} account, but this account isn't linked with an {platform_name} account yet.").format(
platform_name=settings.PLATFORM_NAME, provider_name=requested_provider.NAME
)
+ "<br/><br/>" +
_("Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard.").format(
platform_name=settings.PLATFORM_NAME, provider_name=requested_provider.NAME
)
+ "<br/><br/>" +
_("If you don't have an {platform_name} account yet, click <strong>Register Now</strong> at the top of the page.").format(
platform_name=settings.PLATFORM_NAME
),
content_type="text/plain",
status=403
)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
"value": _('There was an error receiving your login information. Please email us.'), # TODO: User error message
}) # TODO: this should be status code 400 # pylint: disable=fixme
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('This account has been temporarily locked due to excessive login failures. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': email,
'username': username,
})
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = try_change_enrollment(request)
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookie(request, response)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("This account has not been activated. We have sent another activation message. Please check your email for the activation instructions.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.social_strategy.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except HTTPError:
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@ensure_csrf_cookie
def logout_user(request):
"""
HTTP request to log out the user. Redirects to marketing page.
Deletes both the CSRF and sessionid cookies so the marketing
site can determine the logged in state of the user
"""
# We do not log here, because we have a handler registered
# to perform logging on successful logouts.
logout(request)
if settings.FEATURES.get('AUTH_USE_CAS'):
target = reverse('cas-logout')
else:
target = '/'
response = redirect(target)
response.delete_cookie(
settings.EDXMKTG_COOKIE_NAME,
path='/', domain=settings.SESSION_COOKIE_DOMAIN,
)
return response
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.all()[0].changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = microsite.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
if not form.is_valid():
raise ValidationError(form.errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
user.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow for microsites to define their own set of required/optional/hidden fields
extra_fields = microsite.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = microsite.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
tos_required = (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(
external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
# Perform operations within a transaction that are critical to account creation
with transaction.commit_on_success():
# first, create the account
(user, profile, registration) = _do_create_account(form)
# next, link the account with social auth, if provided
if should_link_with_social_auth:
request.social_strategy = social_utils.load_strategy(backend=params['provider'], request=request)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.social_strategy.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception:
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# Track the user's registration
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': user.email,
'username': user.username,
})
# If the user is registering via 3rd party auth, track which provider they use
provider_name = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_by_backend_name(running_pipeline.get('backend'))
provider_name = current_provider.NAME
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': provider_name
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
create_comments_service_user(user)
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'))
)
if send_email:
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False)
else:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send activation email to user from "%s"', from_address, exc_info=True)
else:
registration.activate()
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
def set_marketing_cookie(request, response):
"""
Set the login cookie for the edx marketing site on the given response. Its
expiration will match that of the given request's session.
"""
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# we want this cookie to be accessed via javascript
# so httponly is set to None
response.set_cookie(
settings.EDXMKTG_COOKIE_NAME,
'true',
max_age=max_age,
expires=expires,
domain=settings.SESSION_COOKIE_DOMAIN,
path='/',
secure=None,
httponly=None
)
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = try_change_enrollment(request)
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_marketing_cookie(request, response)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
course_id = request.GET.get('course_id', None)
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except AccountValidationError:
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
student = User.objects.filter(id=regs[0].user_id)
if student:
ceas = CourseEnrollmentAllowed.objects.filter(email=student[0].email)
for cea in ceas:
if cea.auto_enroll:
CourseEnrollment.enroll(student[0], cea.course_id)
# enroll student in any pending CCXs he/she may have if auto_enroll flag is set
if settings.FEATURES.get('CUSTOM_COURSES_EDX'):
from ccx.models import CcxMembership, CcxFutureMembership
ccxfms = CcxFutureMembership.objects.filter(
email=student[0].email
)
for ccxfm in ccxfms:
if ccxfm.auto_enroll:
CcxMembership.auto_enroll(student[0], ccxfm)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=settings.DEFAULT_FROM_EMAIL,
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def password_reset_confirm_wrapper(
request,
uidb36=None,
token=None,
):
""" A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
"""
# cribbed from django.contrib.auth.views.password_reset_confirm
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
user.is_active = True
user.save()
except (ValueError, User.DoesNotExist):
pass
# tie in password strength enforcement as an optional level of
# security protection
err_msg = None
if request.method == 'POST':
password = request.POST['new_password1']
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_length(password)
validate_password_complexity(password)
validate_password_dictionary(password)
except ValidationError, err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
if err_msg:
# We have an password reset attempt which violates some security policy, use the
# existing Django template to communicate this back to the user
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': err_msg,
'platform_name': settings.PLATFORM_NAME,
}
return TemplateResponse(request, 'registration/password_reset_confirm.html', context)
else:
# we also want to pass settings.PLATFORM_NAME in as extra_context
extra_context = {"platform_name": settings.PLATFORM_NAME}
if request.method == 'POST':
# remember what the old password hash is before we call down
old_password_hash = user.password
result = password_reset_confirm(
request, uidb36=uidb36, token=token, extra_context=extra_context
)
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
return result
else:
return password_reset_confirm(
request, uidb36=uidb36, token=token, extra_context=extra_context
)
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send reactivation email from "%s"', settings.DEFAULT_FROM_EMAIL, exc_info=True)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
# TODO: delete this method and redirect unit tests to validate_new_email and do_email_change_request
# after accounts page work is done.
@ensure_csrf_cookie
def change_email_request(request):
""" AJAX call from the profile page. User wants a new e-mail.
"""
## Make sure it checks for existing e-mail conflicts
if not request.user.is_authenticated():
raise Http404
user = request.user
if not user.check_password(request.POST['password']):
return JsonResponse({
"success": False,
"error": _('Invalid password'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
new_email = request.POST['new_email']
try:
validate_new_email(request.user, new_email)
do_email_change_request(request.user, new_email)
except ValueError as err:
return JsonResponse({
"success": False,
"error": err.message,
})
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
@transaction.commit_manually
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
try:
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.rollback()
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.rollback()
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.rollback()
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.rollback()
return response
response = render_to_response("email_change_successful.html", address_context)
transaction.commit()
return response
except Exception: # pylint: disable=broad-except
# If we get an unexpected exception, be sure to rollback the transaction
transaction.rollback()
raise
# TODO: DELETE AFTER NEW ACCOUNT PAGE DONE
@ensure_csrf_cookie
@require_POST
def change_name_request(request):
""" Log a request for a new name. """
if not request.user.is_authenticated():
raise Http404
try:
pnc = PendingNameChange.objects.get(user=request.user.id)
except PendingNameChange.DoesNotExist:
pnc = PendingNameChange()
pnc.user = request.user
pnc.new_name = request.POST['new_name'].strip()
pnc.rationale = request.POST['rationale']
if len(pnc.new_name) < 2:
return JsonResponse({
"success": False,
"error": _('Name required'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
pnc.save()
# The following automatically accepts name change requests. Remove this to
# go back to the old system where it gets queued up for admin approval.
accept_name_change_by_id(pnc.id)
return JsonResponse({"success": True})
# TODO: DELETE AFTER NEW ACCOUNT PAGE DONE
def accept_name_change_by_id(uid):
"""
Accepts the pending name change request for the user represented
by user id `uid`.
"""
try:
pnc = PendingNameChange.objects.get(id=uid)
except PendingNameChange.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('Invalid ID'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
user = pnc.user
u_prof = UserProfile.objects.get(user=user)
# Save old name
meta = u_prof.get_meta()
if 'old_names' not in meta:
meta['old_names'] = []
meta['old_names'].append([u_prof.name, pnc.rationale, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.name = pnc.new_name
u_prof.save()
pnc.delete()
return JsonResponse({"success": True})
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
| u"User %s (%s) opted in to receive emails from course %s", | 8,301 | lcc_e | python | null | 44ad206402cbad485db6d26a02afd264eaa149f17e5c574b |
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Common codegen classes.
from collections import defaultdict
import operator
import re
import string
import textwrap
import functools
from WebIDL import (
BuiltinTypes,
IDLBuiltinType,
IDLNullValue,
IDLType,
IDLInterfaceMember,
IDLUndefinedValue,
)
from Configuration import (
MemberIsUnforgeable,
getTypesFromCallback,
getTypesFromDescriptor,
getTypesFromDictionary,
)
AUTOGENERATED_WARNING_COMMENT = \
"/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */\n\n"
FINALIZE_HOOK_NAME = '_finalize'
TRACE_HOOK_NAME = '_trace'
CONSTRUCT_HOOK_NAME = '_constructor'
HASINSTANCE_HOOK_NAME = '_hasInstance'
def replaceFileIfChanged(filename, newContents):
"""
Read a copy of the old file, so that we don't touch it if it hasn't changed.
Returns True if the file was updated, false otherwise.
"""
# XXXjdm This doesn't play well with make right now.
# Force the file to always be updated, or else changing CodegenRust.py
# will cause many autogenerated bindings to be regenerated perpetually
# until the result is actually different.
# oldFileContents = ""
# try:
# with open(filename, 'rb') as oldFile:
# oldFileContents = ''.join(oldFile.readlines())
# except:
# pass
# if newContents == oldFileContents:
# return False
with open(filename, 'wb') as f:
f.write(newContents)
return True
def toStringBool(arg):
return str(not not arg).lower()
def toBindingNamespace(arg):
return re.sub("((_workers)?$)", "Binding\\1", arg)
def stripTrailingWhitespace(text):
tail = '\n' if text.endswith('\n') else ''
lines = text.splitlines()
for i in range(len(lines)):
lines[i] = lines[i].rstrip()
return '\n'.join(lines) + tail
def MakeNativeName(name):
return name[0].upper() + name[1:]
builtinNames = {
IDLType.Tags.bool: 'bool',
IDLType.Tags.int8: 'i8',
IDLType.Tags.int16: 'i16',
IDLType.Tags.int32: 'i32',
IDLType.Tags.int64: 'i64',
IDLType.Tags.uint8: 'u8',
IDLType.Tags.uint16: 'u16',
IDLType.Tags.uint32: 'u32',
IDLType.Tags.uint64: 'u64',
IDLType.Tags.unrestricted_float: 'f32',
IDLType.Tags.float: 'Finite<f32>',
IDLType.Tags.unrestricted_double: 'f64',
IDLType.Tags.double: 'Finite<f64>'
}
numericTags = [
IDLType.Tags.int8, IDLType.Tags.uint8,
IDLType.Tags.int16, IDLType.Tags.uint16,
IDLType.Tags.int32, IDLType.Tags.uint32,
IDLType.Tags.int64, IDLType.Tags.uint64,
IDLType.Tags.unrestricted_float,
IDLType.Tags.unrestricted_double
]
def unwrapCastableObject(descriptor, source, codeOnFailure, conversionFunction):
"""
A function for unwrapping an object named by the "source" argument
based on the passed-in descriptor. Returns the string of the Rust expression of
the appropriate type.
codeOnFailure is the code to run if unwrapping fails.
"""
args = {
"failureCode": CGIndenter(CGGeneric(codeOnFailure), 8).define(),
"function": conversionFunction,
"source": source,
}
return """\
match %(function)s(%(source)s) {
Ok(val) => val,
Err(()) => {
%(failureCode)s
}
}""" % args
# We'll want to insert the indent at the beginnings of lines, but we
# don't want to indent empty lines. So only indent lines that have a
# non-newline character on them.
lineStartDetector = re.compile("^(?=[^\n#])", re.MULTILINE)
def indent(s, indentLevel=2):
"""
Indent C++ code.
Weird secret feature: this doesn't indent lines that start with # (such as
#include lines or #ifdef/#endif).
"""
if s == "":
return s
return re.sub(lineStartDetector, indentLevel * " ", s)
# dedent() and fill() are often called on the same string multiple
# times. We want to memoize their return values so we don't keep
# recomputing them all the time.
def memoize(fn):
"""
Decorator to memoize a function of one argument. The cache just
grows without bound.
"""
cache = {}
@functools.wraps(fn)
def wrapper(arg):
retval = cache.get(arg)
if retval is None:
retval = cache[arg] = fn(arg)
return retval
return wrapper
@memoize
def dedent(s):
"""
Remove all leading whitespace from s, and remove a blank line
at the beginning.
"""
if s.startswith('\n'):
s = s[1:]
return textwrap.dedent(s)
# This works by transforming the fill()-template to an equivalent
# string.Template.
fill_multiline_substitution_re = re.compile(r"( *)\$\*{(\w+)}(\n)?")
@memoize
def compile_fill_template(template):
"""
Helper function for fill(). Given the template string passed to fill(),
do the reusable part of template processing and return a pair (t,
argModList) that can be used every time fill() is called with that
template argument.
argsModList is list of tuples that represent modifications to be
made to args. Each modification has, in order: i) the arg name,
ii) the modified name, iii) the indent depth.
"""
t = dedent(template)
assert t.endswith("\n") or "\n" not in t
argModList = []
def replace(match):
"""
Replaces a line like ' $*{xyz}\n' with '${xyz_n}',
where n is the indent depth, and add a corresponding entry to
argModList.
Note that this needs to close over argModList, so it has to be
defined inside compile_fill_template().
"""
indentation, name, nl = match.groups()
depth = len(indentation)
# Check that $*{xyz} appears by itself on a line.
prev = match.string[:match.start()]
if (prev and not prev.endswith("\n")) or nl is None:
raise ValueError("Invalid fill() template: $*{%s} must appear by itself on a line" % name)
# Now replace this whole line of template with the indented equivalent.
modified_name = name + "_" + str(depth)
argModList.append((name, modified_name, depth))
return "${" + modified_name + "}"
t = re.sub(fill_multiline_substitution_re, replace, t)
return (string.Template(t), argModList)
def fill(template, **args):
"""
Convenience function for filling in a multiline template.
`fill(template, name1=v1, name2=v2)` is a lot like
`string.Template(template).substitute({"name1": v1, "name2": v2})`.
However, it's shorter, and has a few nice features:
* If `template` is indented, fill() automatically dedents it!
This makes code using fill() with Python's multiline strings
much nicer to look at.
* If `template` starts with a blank line, fill() strips it off.
(Again, convenient with multiline strings.)
* fill() recognizes a special kind of substitution
of the form `$*{name}`.
Use this to paste in, and automatically indent, multiple lines.
(Mnemonic: The `*` is for "multiple lines").
A `$*` substitution must appear by itself on a line, with optional
preceding indentation (spaces only). The whole line is replaced by the
corresponding keyword argument, indented appropriately. If the
argument is an empty string, no output is generated, not even a blank
line.
"""
t, argModList = compile_fill_template(template)
# Now apply argModList to args
for (name, modified_name, depth) in argModList:
if not (args[name] == "" or args[name].endswith("\n")):
raise ValueError("Argument %s with value %r is missing a newline" % (name, args[name]))
args[modified_name] = indent(args[name], depth)
return t.substitute(args)
class CGThing():
"""
Abstract base class for things that spit out code.
"""
def __init__(self):
pass # Nothing for now
def define(self):
"""Produce code for a Rust file."""
raise NotImplementedError # Override me!
class CGNativePropertyHooks(CGThing):
"""
Generate a NativePropertyHooks for a given descriptor
"""
def __init__(self, descriptor, properties):
CGThing.__init__(self)
self.descriptor = descriptor
self.properties = properties
def define(self):
parent = self.descriptor.interface.parent
if parent:
parentHooks = ("Some(&::dom::bindings::codegen::Bindings::%sBinding::sNativePropertyHooks)"
% parent.identifier.name)
else:
parentHooks = "None"
substitutions = {
"parentHooks": parentHooks
}
return string.Template(
"pub static sNativePropertyHooks: NativePropertyHooks = NativePropertyHooks {\n"
" native_properties: &sNativeProperties,\n"
" proto_hooks: ${parentHooks},\n"
"};\n").substitute(substitutions)
class CGMethodCall(CGThing):
"""
A class to generate selection of a method signature from a set of
signatures and generation of a call to that signature.
"""
def __init__(self, argsPre, nativeMethodName, static, descriptor, method):
CGThing.__init__(self)
methodName = '\\"%s.%s\\"' % (descriptor.interface.identifier.name, method.identifier.name)
def requiredArgCount(signature):
arguments = signature[1]
if len(arguments) == 0:
return 0
requiredArgs = len(arguments)
while requiredArgs and arguments[requiredArgs - 1].optional:
requiredArgs -= 1
return requiredArgs
signatures = method.signatures()
def getPerSignatureCall(signature, argConversionStartsAt=0):
signatureIndex = signatures.index(signature)
return CGPerSignatureCall(signature[0], argsPre, signature[1],
nativeMethodName + '_' * signatureIndex,
static, descriptor,
method, argConversionStartsAt)
if len(signatures) == 1:
# Special case: we can just do a per-signature method call
# here for our one signature and not worry about switching
# on anything.
signature = signatures[0]
self.cgRoot = CGList([getPerSignatureCall(signature)])
requiredArgs = requiredArgCount(signature)
if requiredArgs > 0:
code = (
"if argc < %d {\n"
" throw_type_error(cx, \"Not enough arguments to %s.\");\n"
" return false;\n"
"}" % (requiredArgs, methodName))
self.cgRoot.prepend(
CGWrapper(CGGeneric(code), pre="\n", post="\n"))
return
# Need to find the right overload
maxArgCount = method.maxArgCount
allowedArgCounts = method.allowedArgCounts
argCountCases = []
for argCount in allowedArgCounts:
possibleSignatures = method.signaturesForArgCount(argCount)
if len(possibleSignatures) == 1:
# easy case!
signature = possibleSignatures[0]
argCountCases.append(CGCase(str(argCount), getPerSignatureCall(signature)))
continue
distinguishingIndex = method.distinguishingIndexForArgCount(argCount)
# We can't handle unions at the distinguishing index.
for (returnType, args) in possibleSignatures:
if args[distinguishingIndex].type.isUnion():
raise TypeError("No support for unions as distinguishing "
"arguments yet: %s",
args[distinguishingIndex].location)
# Convert all our arguments up to the distinguishing index.
# Doesn't matter which of the possible signatures we use, since
# they all have the same types up to that point; just use
# possibleSignatures[0]
caseBody = [
CGArgumentConverter(possibleSignatures[0][1][i],
i, "args", "argc", descriptor)
for i in range(0, distinguishingIndex)]
# Select the right overload from our set.
distinguishingArg = "args.get(%d)" % distinguishingIndex
def pickFirstSignature(condition, filterLambda):
sigs = filter(filterLambda, possibleSignatures)
assert len(sigs) < 2
if len(sigs) > 0:
call = getPerSignatureCall(sigs[0], distinguishingIndex)
if condition is None:
caseBody.append(call)
else:
caseBody.append(CGGeneric("if " + condition + " {"))
caseBody.append(CGIndenter(call))
caseBody.append(CGGeneric("}"))
return True
return False
# First check for null or undefined
pickFirstSignature("%s.isNullOrUndefined()" % distinguishingArg,
lambda s: (s[1][distinguishingIndex].type.nullable() or
s[1][distinguishingIndex].type.isDictionary()))
# Now check for distinguishingArg being an object that implements a
# non-callback interface. That includes typed arrays and
# arraybuffers.
interfacesSigs = [
s for s in possibleSignatures
if (s[1][distinguishingIndex].type.isObject() or
s[1][distinguishingIndex].type.isNonCallbackInterface())]
# There might be more than one of these; we need to check
# which ones we unwrap to.
if len(interfacesSigs) > 0:
# The spec says that we should check for "platform objects
# implementing an interface", but it's enough to guard on these
# being an object. The code for unwrapping non-callback
# interfaces and typed arrays will just bail out and move on to
# the next overload if the object fails to unwrap correctly. We
# could even not do the isObject() check up front here, but in
# cases where we have multiple object overloads it makes sense
# to do it only once instead of for each overload. That will
# also allow the unwrapping test to skip having to do codegen
# for the null-or-undefined case, which we already handled
# above.
caseBody.append(CGGeneric("if %s.get().is_object() {" %
(distinguishingArg)))
for idx, sig in enumerate(interfacesSigs):
caseBody.append(CGIndenter(CGGeneric("loop {")))
type = sig[1][distinguishingIndex].type
# The argument at index distinguishingIndex can't possibly
# be unset here, because we've already checked that argc is
# large enough that we can examine this argument.
info = getJSToNativeConversionInfo(
type, descriptor, failureCode="break;", isDefinitelyObject=True)
template = info.template
declType = info.declType
testCode = instantiateJSToNativeConversionTemplate(
template,
{"val": distinguishingArg},
declType,
"arg%d" % distinguishingIndex)
# Indent by 4, since we need to indent further than our "do" statement
caseBody.append(CGIndenter(testCode, 4))
# If we got this far, we know we unwrapped to the right
# interface, so just do the call. Start conversion with
# distinguishingIndex + 1, since we already converted
# distinguishingIndex.
caseBody.append(CGIndenter(
getPerSignatureCall(sig, distinguishingIndex + 1), 4))
caseBody.append(CGIndenter(CGGeneric("}")))
caseBody.append(CGGeneric("}"))
# XXXbz Now we're supposed to check for distinguishingArg being
# an array or a platform object that supports indexed
# properties... skip that last for now. It's a bit of a pain.
pickFirstSignature("%s.get().isObject() && IsArrayLike(cx, &%s.get().toObject())" %
(distinguishingArg, distinguishingArg),
lambda s:
(s[1][distinguishingIndex].type.isArray() or
s[1][distinguishingIndex].type.isSequence() or
s[1][distinguishingIndex].type.isObject()))
# Check for Date objects
# XXXbz Do we need to worry about security wrappers around the Date?
pickFirstSignature("%s.get().isObject() && JS_ObjectIsDate(cx, &%s.get().toObject())" %
(distinguishingArg, distinguishingArg),
lambda s: (s[1][distinguishingIndex].type.isDate() or
s[1][distinguishingIndex].type.isObject()))
# Check for vanilla JS objects
# XXXbz Do we need to worry about security wrappers?
pickFirstSignature("%s.get().is_object() && !is_platform_object(%s.get().to_object())" %
(distinguishingArg, distinguishingArg),
lambda s: (s[1][distinguishingIndex].type.isCallback() or
s[1][distinguishingIndex].type.isCallbackInterface() or
s[1][distinguishingIndex].type.isDictionary() or
s[1][distinguishingIndex].type.isObject()))
# The remaining cases are mutually exclusive. The
# pickFirstSignature calls are what change caseBody
# Check for strings or enums
if pickFirstSignature(None,
lambda s: (s[1][distinguishingIndex].type.isString() or
s[1][distinguishingIndex].type.isEnum())):
pass
# Check for primitives
elif pickFirstSignature(None,
lambda s: s[1][distinguishingIndex].type.isPrimitive()):
pass
# Check for "any"
elif pickFirstSignature(None,
lambda s: s[1][distinguishingIndex].type.isAny()):
pass
else:
# Just throw; we have no idea what we're supposed to
# do with this.
caseBody.append(CGGeneric("return Throw(cx, NS_ERROR_XPC_BAD_CONVERT_JS);"))
argCountCases.append(CGCase(str(argCount),
CGList(caseBody, "\n")))
overloadCGThings = []
overloadCGThings.append(
CGGeneric("let argcount = cmp::min(argc, %d);" %
maxArgCount))
overloadCGThings.append(
CGSwitch("argcount",
argCountCases,
CGGeneric("throw_type_error(cx, \"Not enough arguments to %s.\");\n"
"return false;" % methodName)))
# XXXjdm Avoid unreachable statement warnings
# overloadCGThings.append(
# CGGeneric('panic!("We have an always-returning default case");\n'
# 'return false;'))
self.cgRoot = CGWrapper(CGList(overloadCGThings, "\n"),
pre="\n")
def define(self):
return self.cgRoot.define()
def dictionaryHasSequenceMember(dictionary):
return (any(typeIsSequenceOrHasSequenceMember(m.type) for m in
dictionary.members) or
(dictionary.parent and
dictionaryHasSequenceMember(dictionary.parent)))
def typeIsSequenceOrHasSequenceMember(type):
if type.nullable():
type = type.inner
if type.isSequence():
return True
if type.isArray():
elementType = type.inner
return typeIsSequenceOrHasSequenceMember(elementType)
if type.isDictionary():
return dictionaryHasSequenceMember(type.inner)
if type.isUnion():
return any(typeIsSequenceOrHasSequenceMember(m.type) for m in
type.flatMemberTypes)
return False
def typeNeedsRooting(type, descriptorProvider):
return (type.isGeckoInterface() and
descriptorProvider.getDescriptor(type.unroll().inner.identifier.name).needsRooting)
def union_native_type(t):
name = t.unroll().name
return 'UnionTypes::%s' % name
class JSToNativeConversionInfo():
"""
An object representing information about a JS-to-native conversion.
"""
def __init__(self, template, default=None, declType=None,
needsRooting=False):
"""
template: A string representing the conversion code. This will have
template substitution performed on it as follows:
${val} is a handle to the JS::Value in question
default: A string or None representing rust code for default value(if any).
declType: A CGThing representing the native C++ type we're converting
to. This is allowed to be None if the conversion code is
supposed to be used as-is.
needsRooting: A boolean indicating whether the caller has to root
the result
"""
assert isinstance(template, str)
assert declType is None or isinstance(declType, CGThing)
self.template = template
self.default = default
self.declType = declType
self.needsRooting = needsRooting
def getJSToNativeConversionInfo(type, descriptorProvider, failureCode=None,
isDefinitelyObject=False,
isMember=False,
isArgument=False,
invalidEnumValueFatal=True,
defaultValue=None,
treatNullAs="Default",
isEnforceRange=False,
isClamp=False,
exceptionCode=None,
allowTreatNonObjectAsNull=False,
isCallbackReturnValue=False,
sourceDescription="value"):
"""
Get a template for converting a JS value to a native object based on the
given type and descriptor. If failureCode is given, then we're actually
testing whether we can convert the argument to the desired type. That
means that failures to convert due to the JS value being the wrong type of
value need to use failureCode instead of throwing exceptions. Failures to
convert that are due to JS exceptions (from toString or valueOf methods) or
out of memory conditions need to throw exceptions no matter what
failureCode is.
If isDefinitelyObject is True, that means we know the value
isObject() and we have no need to recheck that.
if isMember is True, we're being converted from a property of some
JS object, not from an actual method argument, so we can't rely on
our jsval being rooted or outliving us in any way. Any caller
passing true needs to ensure that it is handled correctly in
typeIsSequenceOrHasSequenceMember.
invalidEnumValueFatal controls whether an invalid enum value conversion
attempt will throw (if true) or simply return without doing anything (if
false).
If defaultValue is not None, it's the IDL default value for this conversion
If isEnforceRange is true, we're converting an integer and throwing if the
value is out of range.
If isClamp is true, we're converting an integer and clamping if the
value is out of range.
If allowTreatNonObjectAsNull is true, then [TreatNonObjectAsNull]
extended attributes on nullable callback functions will be honored.
The return value from this function is an object of JSToNativeConversionInfo consisting of four things:
1) A string representing the conversion code. This will have template
substitution performed on it as follows:
${val} replaced by an expression for the JS::Value in question
2) A string or None representing Rust code for the default value (if any).
3) A CGThing representing the native C++ type we're converting to
(declType). This is allowed to be None if the conversion code is
supposed to be used as-is.
4) A boolean indicating whether the caller has to root the result.
"""
# We should not have a defaultValue if we know we're an object
assert not isDefinitelyObject or defaultValue is None
# If exceptionCode is not set, we'll just rethrow the exception we got.
# Note that we can't just set failureCode to exceptionCode, because setting
# failureCode will prevent pending exceptions from being set in cases when
# they really should be!
if exceptionCode is None:
exceptionCode = "return false;"
needsRooting = typeNeedsRooting(type, descriptorProvider)
def handleOptional(template, declType, default):
assert (defaultValue is None) == (default is None)
return JSToNativeConversionInfo(template, default, declType, needsRooting=needsRooting)
# Unfortunately, .capitalize() on a string will lowercase things inside the
# string, which we do not want.
def firstCap(string):
return string[0].upper() + string[1:]
# Helper functions for dealing with failures due to the JS value being the
# wrong type of value.
def onFailureNotAnObject(failureCode):
return CGWrapper(
CGGeneric(
failureCode or
('throw_type_error(cx, "%s is not an object.");\n'
'%s' % (firstCap(sourceDescription), exceptionCode))),
post="\n")
def onFailureNotCallable(failureCode):
return CGWrapper(
CGGeneric(
failureCode or
('throw_type_error(cx, \"%s is not callable.\");\n'
'%s' % (firstCap(sourceDescription), exceptionCode))))
# A helper function for handling null default values. Checks that the
# default value, if it exists, is null.
def handleDefaultNull(nullValue):
if defaultValue is None:
return None
if not isinstance(defaultValue, IDLNullValue):
raise TypeError("Can't handle non-null default value here")
assert type.nullable() or type.isDictionary()
return nullValue
# A helper function for wrapping up the template body for
# possibly-nullable objecty stuff
def wrapObjectTemplate(templateBody, nullValue, isDefinitelyObject, type,
failureCode=None):
if not isDefinitelyObject:
# Handle the non-object cases by wrapping up the whole
# thing in an if cascade.
templateBody = (
"if ${val}.get().is_object() {\n" +
CGIndenter(CGGeneric(templateBody)).define() + "\n")
if type.nullable():
templateBody += (
"} else if ${val}.get().is_null_or_undefined() {\n"
" %s\n") % nullValue
templateBody += (
"} else {\n" +
CGIndenter(onFailureNotAnObject(failureCode)).define() +
"}")
return templateBody
assert not (isEnforceRange and isClamp) # These are mutually exclusive
if type.isArray():
raise TypeError("Can't handle array arguments yet")
if type.isSequence():
raise TypeError("Can't handle sequence arguments yet")
if type.isUnion():
declType = CGGeneric(union_native_type(type))
if type.nullable():
declType = CGWrapper(declType, pre="Option<", post=" >")
templateBody = ("match FromJSValConvertible::from_jsval(cx, ${val}, ()) {\n"
" Ok(value) => value,\n"
" Err(()) => { %s },\n"
"}" % exceptionCode)
return handleOptional(templateBody, declType, handleDefaultNull("None"))
if type.isGeckoInterface():
assert not isEnforceRange and not isClamp
descriptor = descriptorProvider.getDescriptor(
type.unroll().inner.identifier.name)
if descriptor.interface.isCallback():
name = descriptor.nativeType
declType = CGWrapper(CGGeneric(name), pre="Rc<", post=">")
template = "%s::new(${val}.get().to_object())" % name
if type.nullable():
declType = CGWrapper(declType, pre="Option<", post=">")
template = wrapObjectTemplate("Some(%s)" % template, "None",
isDefinitelyObject, type,
failureCode)
return handleOptional(template, declType, handleDefaultNull("None"))
conversionFunction = "root_from_handlevalue"
descriptorType = descriptor.returnType
if isMember == "Variadic":
conversionFunction = "native_from_handlevalue"
descriptorType = descriptor.nativeType
elif isArgument:
descriptorType = descriptor.argumentType
templateBody = ""
if descriptor.interface.isConsequential():
raise TypeError("Consequential interface %s being used as an "
"argument" % descriptor.interface.identifier.name)
if failureCode is None:
substitutions = {
"sourceDescription": sourceDescription,
"interface": descriptor.interface.identifier.name,
"exceptionCode": exceptionCode,
}
unwrapFailureCode = string.Template(
'throw_type_error(cx, "${sourceDescription} does not '
'implement interface ${interface}.");\n'
'${exceptionCode}').substitute(substitutions)
else:
unwrapFailureCode = failureCode
templateBody = unwrapCastableObject(
descriptor, "${val}", unwrapFailureCode, conversionFunction)
declType = CGGeneric(descriptorType)
if type.nullable():
templateBody = "Some(%s)" % templateBody
declType = CGWrapper(declType, pre="Option<", post=">")
templateBody = wrapObjectTemplate(templateBody, "None",
isDefinitelyObject, type, failureCode)
return handleOptional(templateBody, declType, handleDefaultNull("None"))
if type.isSpiderMonkeyInterface():
raise TypeError("Can't handle SpiderMonkey interface arguments yet")
if type.isDOMString():
assert not isEnforceRange and not isClamp
treatAs = {
"Default": "StringificationBehavior::Default",
"EmptyString": "StringificationBehavior::Empty",
}
if treatNullAs not in treatAs:
raise TypeError("We don't support [TreatNullAs=%s]" % treatNullAs)
if type.nullable():
# Note: the actual behavior passed here doesn't matter for nullable
# strings.
nullBehavior = "StringificationBehavior::Default"
else:
nullBehavior = treatAs[treatNullAs]
conversionCode = (
"match FromJSValConvertible::from_jsval(cx, ${val}, %s) {\n"
" Ok(strval) => strval,\n"
" Err(_) => { %s },\n"
"}" % (nullBehavior, exceptionCode))
if defaultValue is None:
default = None
elif isinstance(defaultValue, IDLNullValue):
assert type.nullable()
default = "None"
else:
assert defaultValue.type.tag() == IDLType.Tags.domstring
default = 'DOMString::from("%s")' % defaultValue.value
if type.nullable():
default = "Some(%s)" % default
declType = "DOMString"
if type.nullable():
declType = "Option<%s>" % declType
return handleOptional(conversionCode, CGGeneric(declType), default)
if type.isUSVString():
assert not isEnforceRange and not isClamp
conversionCode = (
"match FromJSValConvertible::from_jsval(cx, ${val}, ()) {\n"
" Ok(strval) => strval,\n"
" Err(_) => { %s },\n"
"}" % exceptionCode)
if defaultValue is None:
default = None
elif isinstance(defaultValue, IDLNullValue):
assert type.nullable()
default = "None"
else:
assert defaultValue.type.tag() in (IDLType.Tags.domstring, IDLType.Tags.usvstring)
default = 'USVString("%s".to_owned())' % defaultValue.value
if type.nullable():
default = "Some(%s)" % default
declType = "USVString"
if type.nullable():
declType = "Option<%s>" % declType
return handleOptional(conversionCode, CGGeneric(declType), default)
if type.isByteString():
assert not isEnforceRange and not isClamp
conversionCode = (
"match FromJSValConvertible::from_jsval(cx, ${val}, ()) {\n"
" Ok(strval) => strval,\n"
" Err(_) => { %s },\n"
"}" % exceptionCode)
declType = CGGeneric("ByteString")
if type.nullable():
declType = CGWrapper(declType, pre="Option<", post=">")
return handleOptional(conversionCode, declType, handleDefaultNull("None"))
if type.isEnum():
assert not isEnforceRange and not isClamp
if type.nullable():
raise TypeError("We don't support nullable enumerated arguments "
"yet")
enum = type.inner.identifier.name
if invalidEnumValueFatal:
handleInvalidEnumValueCode = exceptionCode
else:
handleInvalidEnumValueCode = "return true;"
template = (
"match find_enum_string_index(cx, ${val}, %(values)s) {\n"
" Err(_) => { %(exceptionCode)s },\n"
" Ok(None) => { %(handleInvalidEnumValueCode)s },\n"
" Ok(Some(index)) => {\n"
" //XXXjdm need some range checks up in here.\n"
" mem::transmute(index)\n"
" },\n"
"}" % {"values": enum + "Values::strings",
"exceptionCode": exceptionCode,
"handleInvalidEnumValueCode": handleInvalidEnumValueCode})
if defaultValue is not None:
assert defaultValue.type.tag() == IDLType.Tags.domstring
default = "%s::%s" % (enum, getEnumValueName(defaultValue.value))
else:
default = None
return handleOptional(template, CGGeneric(enum), default)
if type.isCallback():
assert not isEnforceRange and not isClamp
assert not type.treatNonCallableAsNull()
assert not type.treatNonObjectAsNull() or type.nullable()
assert not type.treatNonObjectAsNull() or not type.treatNonCallableAsNull()
callback = type.unroll().callback
declType = CGGeneric('%s::%s' % (callback.module(), callback.identifier.name))
finalDeclType = CGTemplatedType("Rc", declType)
conversion = CGCallbackTempRoot(declType.define())
if type.nullable():
declType = CGTemplatedType("Option", declType)
finalDeclType = CGTemplatedType("Option", finalDeclType)
conversion = CGWrapper(conversion, pre="Some(", post=")")
if allowTreatNonObjectAsNull and type.treatNonObjectAsNull():
if not isDefinitelyObject:
haveObject = "${val}.get().is_object()"
template = CGIfElseWrapper(haveObject,
conversion,
CGGeneric("None")).define()
else:
template = conversion
else:
template = CGIfElseWrapper("IsCallable(${val}.get().to_object())",
conversion,
onFailureNotCallable(failureCode)).define()
template = wrapObjectTemplate(
template,
"None",
isDefinitelyObject,
type,
failureCode)
if defaultValue is not None:
assert allowTreatNonObjectAsNull
assert type.treatNonObjectAsNull()
assert type.nullable()
assert isinstance(defaultValue, IDLNullValue)
default = "None"
else:
default = None
return JSToNativeConversionInfo(template, default, finalDeclType, needsRooting=needsRooting)
if type.isAny():
assert not isEnforceRange and not isClamp
declType = ""
default = ""
if isMember == "Dictionary":
# TODO: Need to properly root dictionaries
# https://github.com/servo/servo/issues/6381
declType = CGGeneric("JSVal")
if defaultValue is None:
default = None
elif isinstance(defaultValue, IDLNullValue):
default = "NullValue()"
elif isinstance(defaultValue, IDLUndefinedValue):
default = "UndefinedValue()"
else:
raise TypeError("Can't handle non-null, non-undefined default value here")
else:
declType = CGGeneric("HandleValue")
if defaultValue is None:
default = None
elif isinstance(defaultValue, IDLNullValue):
default = "HandleValue::null()"
elif isinstance(defaultValue, IDLUndefinedValue):
default = "HandleValue::undefined()"
else:
raise TypeError("Can't handle non-null, non-undefined default value here")
return handleOptional("${val}", declType, default)
if type.isObject():
assert not isEnforceRange and not isClamp
# TODO: Need to root somehow
# https://github.com/servo/servo/issues/6382
declType = CGGeneric("*mut JSObject")
templateBody = wrapObjectTemplate("${val}.get().to_object()",
"ptr::null_mut()",
isDefinitelyObject, type, failureCode)
return handleOptional(templateBody, declType,
handleDefaultNull("ptr::null_mut()"))
if type.isDictionary():
if failureCode is not None:
raise TypeError("Can't handle dictionaries when failureCode is not None")
# There are no nullable dictionaries
assert not type.nullable()
typeName = CGDictionary.makeDictionaryName(type.inner)
declType = CGGeneric(typeName)
template = ("match %s::new(cx, ${val}) {\n"
" Ok(dictionary) => dictionary,\n"
" Err(_) => return false,\n"
"}" % typeName)
return handleOptional(template, declType, handleDefaultNull("%s::empty(cx)" % typeName))
if type.isVoid():
# This one only happens for return values, and its easy: Just
# ignore the jsval.
return JSToNativeConversionInfo("", None, None, needsRooting=False)
if not type.isPrimitive():
raise TypeError("Need conversion for argument type '%s'" % str(type))
if type.isInteger():
if isEnforceRange:
conversionBehavior = "ConversionBehavior::EnforceRange"
elif isClamp:
conversionBehavior = "ConversionBehavior::Clamp"
else:
conversionBehavior = "ConversionBehavior::Default"
else:
assert not isEnforceRange and not isClamp
conversionBehavior = "()"
if failureCode is None:
failureCode = 'return false'
declType = CGGeneric(builtinNames[type.tag()])
if type.nullable():
declType = CGWrapper(declType, pre="Option<", post=">")
template = (
"match FromJSValConvertible::from_jsval(cx, ${val}, %s) {\n"
" Ok(v) => v,\n"
" Err(_) => { %s }\n"
"}" % (conversionBehavior, exceptionCode))
if defaultValue is not None:
if isinstance(defaultValue, IDLNullValue):
assert type.nullable()
defaultStr = "None"
else:
tag = defaultValue.type.tag()
if tag in [IDLType.Tags.float, IDLType.Tags.double]:
defaultStr = "Finite::wrap(%s)" % defaultValue.value
elif tag in numericTags:
defaultStr = str(defaultValue.value)
else:
assert tag == IDLType.Tags.bool
defaultStr = toStringBool(defaultValue.value)
if type.nullable():
defaultStr = "Some(%s)" % defaultStr
else:
defaultStr = None
return handleOptional(template, declType, defaultStr)
def instantiateJSToNativeConversionTemplate(templateBody, replacements,
declType, declName):
"""
Take the templateBody and declType as returned by
getJSToNativeConversionInfo, a set of replacements as required by the
strings in such a templateBody, and a declName, and generate code to
convert into a stack Rust binding with that name.
"""
result = CGList([], "\n")
conversion = CGGeneric(string.Template(templateBody).substitute(replacements))
if declType is not None:
newDecl = [
CGGeneric("let "),
CGGeneric(declName),
CGGeneric(": "),
declType,
CGGeneric(" = "),
conversion,
CGGeneric(";"),
]
result.append(CGList(newDecl))
else:
result.append(conversion)
# Add an empty CGGeneric to get an extra newline after the argument
# conversion.
result.append(CGGeneric(""))
return result
def convertConstIDLValueToJSVal(value):
if isinstance(value, IDLNullValue):
return "NullVal"
tag = value.type.tag()
if tag in [IDLType.Tags.int8, IDLType.Tags.uint8, IDLType.Tags.int16,
IDLType.Tags.uint16, IDLType.Tags.int32]:
return "IntVal(%s)" % (value.value)
if tag == IDLType.Tags.uint32:
return "UintVal(%s)" % (value.value)
if tag in [IDLType.Tags.int64, IDLType.Tags.uint64]:
return "DoubleVal(%s)" % (value.value)
if tag == IDLType.Tags.bool:
return "BoolVal(true)" if value.value else "BoolVal(false)"
if tag in [IDLType.Tags.unrestricted_float, IDLType.Tags.float,
IDLType.Tags.unrestricted_double, IDLType.Tags.double]:
return "DoubleVal(%s)" % (value.value)
raise TypeError("Const value of unhandled type: " + value.type)
class CGArgumentConverter(CGThing):
"""
A class that takes an IDL argument object, its index in the
argument list, and the argv and argc strings and generates code to
unwrap the argument to the right native type.
"""
def __init__(self, argument, index, args, argc, descriptorProvider,
invalidEnumValueFatal=True):
CGThing.__init__(self)
assert not argument.defaultValue or argument.optional
replacer = {
"index": index,
"argc": argc,
"args": args
}
replacementVariables = {
"val": string.Template("${args}.get(${index})").substitute(replacer),
}
info = getJSToNativeConversionInfo(
argument.type,
descriptorProvider,
invalidEnumValueFatal=invalidEnumValueFatal,
defaultValue=argument.defaultValue,
treatNullAs=argument.treatNullAs,
isEnforceRange=argument.enforceRange,
isClamp=argument.clamp,
isMember="Variadic" if argument.variadic else False,
allowTreatNonObjectAsNull=argument.allowTreatNonCallableAsNull())
template = info.template
default = info.default
declType = info.declType
if not argument.variadic:
if argument.optional:
condition = "{args}.get({index}).is_undefined()".format(**replacer)
if argument.defaultValue:
assert default
template = CGIfElseWrapper(condition,
CGGeneric(default),
CGGeneric(template)).define()
else:
assert not default
declType = CGWrapper(declType, pre="Option<", post=">")
template = CGIfElseWrapper(condition,
CGGeneric("None"),
CGGeneric("Some(%s)" % template)).define()
else:
assert not default
self.converter = instantiateJSToNativeConversionTemplate(
template, replacementVariables, declType, "arg%d" % index)
else:
assert argument.optional
variadicConversion = {
"val": string.Template("${args}.get(variadicArg)").substitute(replacer),
}
innerConverter = [instantiateJSToNativeConversionTemplate(
template, variadicConversion, declType, "slot")]
arg = "arg%d" % index
if argument.type.isGeckoInterface():
vec = "RootedVec::new()"
innerConverter.append(CGGeneric("%s.push(JS::from_ref(&*slot));" % arg))
else:
vec = "vec![]"
innerConverter.append(CGGeneric("%s.push(slot);" % arg))
inner = CGIndenter(CGList(innerConverter, "\n"), 8).define()
self.converter = CGGeneric("""\
let mut %(arg)s = %(vec)s;
if %(argc)s > %(index)s {
%(arg)s.reserve(%(argc)s as usize - %(index)s);
for variadicArg in %(index)s..%(argc)s {
%(inner)s
}
}""" % {'arg': arg, 'argc': argc, 'index': index, 'inner': inner, 'vec': vec})
def define(self):
return self.converter.define()
def wrapForType(jsvalRef, result='result', successCode='return true;', pre=''):
"""
Reflect a Rust value into JS.
* 'jsvalRef': a MutableHandleValue in which to store the result
of the conversion;
* 'result': the name of the variable in which the Rust value is stored;
* 'successCode': the code to run once we have done the conversion.
* 'pre': code to run before the conversion if rooting is necessary
"""
wrap = "%s\n(%s).to_jsval(cx, %s);" % (pre, result, jsvalRef)
if successCode:
wrap += "\n%s" % successCode
return wrap
def typeNeedsCx(type, retVal=False):
if type is None:
return False
if type.nullable():
type = type.inner
if type.isSequence() or type.isArray():
type = type.inner
if type.isUnion():
return any(typeNeedsCx(t) for t in type.unroll().flatMemberTypes)
if retVal and type.isSpiderMonkeyInterface():
return True
return type.isAny() or type.isObject()
# Returns a CGThing containing the type of the return value.
def getRetvalDeclarationForType(returnType, descriptorProvider):
if returnType is None or returnType.isVoid():
# Nothing to declare
return CGGeneric("()")
if returnType.isPrimitive() and returnType.tag() in builtinNames:
result = CGGeneric(builtinNames[returnType.tag()])
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isDOMString():
result = CGGeneric("DOMString")
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isUSVString():
result = CGGeneric("USVString")
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isByteString():
result = CGGeneric("ByteString")
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isEnum():
result = CGGeneric(returnType.unroll().inner.identifier.name)
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isGeckoInterface():
descriptor = descriptorProvider.getDescriptor(
returnType.unroll().inner.identifier.name)
result = CGGeneric(descriptor.returnType)
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isCallback():
callback = returnType.unroll().callback
result = CGGeneric('Rc<%s::%s>' % (callback.module(), callback.identifier.name))
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isUnion():
result = CGGeneric(union_native_type(returnType))
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
# TODO: Return the value through a MutableHandleValue outparam
# https://github.com/servo/servo/issues/6307
if returnType.isAny():
return CGGeneric("JSVal")
if returnType.isObject() or returnType.isSpiderMonkeyInterface():
return CGGeneric("*mut JSObject")
if returnType.isSequence():
inner = returnType.unroll()
result = getRetvalDeclarationForType(inner, descriptorProvider)
result = CGWrapper(result, pre="Vec<", post=">")
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isDictionary():
nullable = returnType.nullable()
dictName = returnType.inner.name if nullable else returnType.name
result = CGGeneric(dictName)
if typeNeedsRooting(returnType, descriptorProvider):
raise TypeError("We don't support rootable dictionaries return values")
if nullable:
result = CGWrapper(result, pre="Option<", post=">")
return result
raise TypeError("Don't know how to declare return value for %s" %
returnType)
class PropertyDefiner:
"""
A common superclass for defining things on prototype objects.
Subclasses should implement generateArray to generate the actual arrays of
things we're defining. They should also set self.regular to the list of
things exposed to web pages.
"""
def __init__(self, descriptor, name):
self.descriptor = descriptor
self.name = name
def variableName(self):
return "s" + self.name
def length(self):
return len(self.regular)
def __str__(self):
# We only need to generate id arrays for things that will end
# up used via ResolveProperty or EnumerateProperties.
return self.generateArray(self.regular, self.variableName())
def generatePrefableArray(self, array, name, specTemplate, specTerminator,
specType, getDataTuple):
"""
This method generates our various arrays.
array is an array of interface members as passed to generateArray
name is the name as passed to generateArray
specTemplate is a template for each entry of the spec array
specTerminator is a terminator for the spec array (inserted at the end
of the array), or None
specType is the actual typename of our spec
getDataTuple is a callback function that takes an array entry and
returns a tuple suitable for substitution into specTemplate.
"""
assert len(array) != 0
specs = []
for member in array:
specs.append(specTemplate % getDataTuple(member))
if specTerminator:
specs.append(specTerminator)
return (("const %s: &'static [%s] = &[\n" +
",\n".join(specs) + "\n" +
"];\n") % (name, specType))
# The length of a method is the minimum of the lengths of the
# argument lists of all its overloads.
def methodLength(method):
signatures = method.signatures()
return min(
len([arg for arg in arguments if not arg.optional and not arg.variadic])
for (_, arguments) in signatures)
class MethodDefiner(PropertyDefiner):
"""
A class for defining methods on a prototype object.
"""
def __init__(self, descriptor, name, static, unforgeable):
assert not (static and unforgeable)
PropertyDefiner.__init__(self, descriptor, name)
# FIXME https://bugzilla.mozilla.org/show_bug.cgi?id=772822
# We should be able to check for special operations without an
# identifier. For now we check if the name starts with __
# Ignore non-static methods for callback interfaces
if not descriptor.interface.isCallback() or static:
methods = [m for m in descriptor.interface.members if
m.isMethod() and m.isStatic() == static and
not m.isIdentifierLess() and
MemberIsUnforgeable(m, descriptor) == unforgeable]
else:
methods = []
self.regular = [{"name": m.identifier.name,
"methodInfo": not m.isStatic(),
"length": methodLength(m)} for m in methods]
# FIXME Check for an existing iterator on the interface first.
if any(m.isGetter() and m.isIndexed() for m in methods):
self.regular.append({"name": '@@iterator',
"methodInfo": False,
"selfHostedName": "ArrayValues",
"length": 0})
isUnforgeableInterface = bool(descriptor.interface.getExtendedAttribute("Unforgeable"))
if not static and unforgeable == isUnforgeableInterface:
stringifier = descriptor.operations['Stringifier']
if stringifier:
self.regular.append({
"name": "toString",
"nativeName": stringifier.identifier.name,
"length": 0,
})
self.unforgeable = unforgeable
def generateArray(self, array, name):
if len(array) == 0:
return ""
flags = "JSPROP_ENUMERATE"
if self.unforgeable:
flags += " | JSPROP_PERMANENT | JSPROP_READONLY"
def specData(m):
# TODO: Use something like JS_FNSPEC
# https://github.com/servo/servo/issues/6391
if "selfHostedName" in m:
selfHostedName = '%s as *const u8 as *const libc::c_char' % str_to_const_array(m["selfHostedName"])
assert not m.get("methodInfo", True)
accessor = "None"
jitinfo = "0 as *const JSJitInfo"
else:
selfHostedName = "0 as *const libc::c_char"
if m.get("methodInfo", True):
identifier = m.get("nativeName", m["name"])
# Go through an intermediate type here, because it's not
# easy to tell whether the methodinfo is a JSJitInfo or
# a JSTypedMethodJitInfo here. The compiler knows, though,
# so let it do the work.
jitinfo = "&%s_methodinfo as *const _ as *const JSJitInfo" % identifier
accessor = "Some(generic_method)"
else:
jitinfo = "0 as *const JSJitInfo"
accessor = 'Some(%s)' % m.get("nativeName", m["name"])
if m["name"].startswith("@@"):
return ('(SymbolCode::%s as i32 + 1)'
% m["name"][2:], accessor, jitinfo, m["length"], flags, selfHostedName)
return (str_to_const_array(m["name"]), accessor, jitinfo, m["length"], flags, selfHostedName)
return self.generatePrefableArray(
array, name,
' JSFunctionSpec {\n'
' name: %s as *const u8 as *const libc::c_char,\n'
' call: JSNativeWrapper { op: %s, info: %s },\n'
' nargs: %s,\n'
' flags: (%s) as u16,\n'
' selfHostedName: %s\n'
' }',
' JSFunctionSpec {\n'
' name: 0 as *const libc::c_char,\n'
' call: JSNativeWrapper { op: None, info: 0 as *const JSJitInfo },\n'
' nargs: 0,\n'
' flags: 0,\n'
' selfHostedName: 0 as *const libc::c_char\n'
' }',
'JSFunctionSpec',
specData)
class AttrDefiner(PropertyDefiner):
def __init__(self, descriptor, name, static, unforgeable):
assert not (static and unforgeable)
PropertyDefiner.__init__(self, descriptor, name)
self.name = name
self.descriptor = descriptor
self.regular = [
m
for m in descriptor.interface.members if
m.isAttr() and m.isStatic() == static and
MemberIsUnforgeable(m, descriptor) == unforgeable
]
self.static = static
self.unforgeable = unforgeable
def generateArray(self, array, name):
if len(array) == 0:
return ""
flags = "JSPROP_ENUMERATE | JSPROP_SHARED"
if self.unforgeable:
flags += " | JSPROP_READONLY | JSPROP_PERMANENT"
def getter(attr):
if self.static:
accessor = 'get_' + self.descriptor.internalNameFor(attr.identifier.name)
jitinfo = "0 as *const JSJitInfo"
else:
if attr.hasLenientThis():
accessor = "generic_lenient_getter"
else:
accessor = "generic_getter"
jitinfo = "&%s_getterinfo" % self.descriptor.internalNameFor(attr.identifier.name)
return ("JSNativeWrapper { op: Some(%(native)s), info: %(info)s }"
% {"info": jitinfo,
"native": accessor})
def setter(attr):
if attr.readonly and not attr.getExtendedAttribute("PutForwards"):
return "JSNativeWrapper { op: None, info: 0 as *const JSJitInfo }"
if self.static:
accessor = 'set_' + self.descriptor.internalNameFor(attr.identifier.name)
jitinfo = "0 as *const JSJitInfo"
else:
if attr.hasLenientThis():
accessor = "generic_lenient_setter"
else:
accessor = "generic_setter"
jitinfo = "&%s_setterinfo" % self.descriptor.internalNameFor(attr.identifier.name)
return ("JSNativeWrapper { op: Some(%(native)s), info: %(info)s }"
% {"info": jitinfo,
"native": accessor})
def specData(attr):
return (str_to_const_array(attr.identifier.name), flags, getter(attr),
setter(attr))
return self.generatePrefableArray(
array, name,
' JSPropertySpec {\n'
' name: %s as *const u8 as *const libc::c_char,\n'
' flags: ((%s) & 0xFF) as u8,\n'
' getter: %s,\n'
' setter: %s\n'
' }',
' JSPropertySpec {\n'
' name: 0 as *const libc::c_char,\n'
' flags: 0,\n'
' getter: JSNativeWrapper { op: None, info: 0 as *const JSJitInfo },\n'
' setter: JSNativeWrapper { op: None, info: 0 as *const JSJitInfo }\n'
' }',
'JSPropertySpec',
specData)
class ConstDefiner(PropertyDefiner):
"""
A class for definining constants on the interface object
"""
def __init__(self, descriptor, name):
PropertyDefiner.__init__(self, descriptor, name)
self.name = name
self.regular = [m for m in descriptor.interface.members if m.isConst()]
def generateArray(self, array, name):
if len(array) == 0:
return ""
def specData(const):
return (str_to_const_array(const.identifier.name),
convertConstIDLValueToJSVal(const.value))
return self.generatePrefableArray(
array, name,
' ConstantSpec { name: %s, value: %s }',
None,
'ConstantSpec',
specData)
# We'll want to insert the indent at the beginnings of lines, but we
# don't want to indent empty lines. So only indent lines that have a
# non-newline character on them.
lineStartDetector = re.compile("^(?=[^\n])", re.MULTILINE)
class CGIndenter(CGThing):
"""
A class that takes another CGThing and generates code that indents that
CGThing by some number of spaces. The default indent is two spaces.
"""
def __init__(self, child, indentLevel=4):
CGThing.__init__(self)
self.child = child
self.indent = " " * indentLevel
def define(self):
defn = self.child.define()
if defn != "":
return re.sub(lineStartDetector, self.indent, defn)
else:
return defn
class CGWrapper(CGThing):
"""
Generic CGThing that wraps other CGThings with pre and post text.
"""
def __init__(self, child, pre="", post="", reindent=False):
CGThing.__init__(self)
self.child = child
self.pre = pre
self.post = post
self.reindent = reindent
def define(self):
defn = self.child.define()
if self.reindent:
# We don't use lineStartDetector because we don't want to
# insert whitespace at the beginning of our _first_ line.
defn = stripTrailingWhitespace(
defn.replace("\n", "\n" + (" " * len(self.pre))))
return self.pre + defn + self.post
class CGImports(CGWrapper):
"""
Generates the appropriate import/use statements.
"""
def __init__(self, child, descriptors, callbacks, imports, ignored_warnings=None):
"""
Adds a set of imports.
"""
if ignored_warnings is None:
ignored_warnings = [
'non_camel_case_types',
'non_upper_case_globals',
'unused_imports',
'unused_variables',
'unused_assignments',
]
def componentTypes(type):
if type.nullable():
type = type.unroll()
if type.isUnion():
return type.flatMemberTypes
return [type]
def isImportable(type):
if not type.isType():
assert type.isInterface()
return not type.isCallback()
return type.isNonCallbackInterface() and not type.builtin
def relatedTypesForSignatures(method):
types = []
for (returnType, arguments) in method.signatures():
types += componentTypes(returnType)
for arg in arguments:
types += componentTypes(arg.type)
return types
def getIdentifier(t):
if t.isType():
return t.inner.identifier
assert t.isInterface()
return t.identifier
types = []
for d in descriptors:
types += [d.interface]
members = d.interface.members + d.interface.namedConstructors
constructor = d.interface.ctor()
if constructor:
members += [constructor]
if d.proxy:
members += [o for o in d.operations.values() if o]
for m in members:
if m.isMethod():
types += relatedTypesForSignatures(m)
elif m.isAttr():
types += componentTypes(m.type)
for c in callbacks:
types += relatedTypesForSignatures(c)
imports += ['dom::types::%s' % getIdentifier(t).name for t in types if isImportable(t)]
statements = []
if len(ignored_warnings) > 0:
statements.append('#![allow(%s)]' % ','.join(ignored_warnings))
statements.extend('use %s;' % i for i in sorted(set(imports)))
CGWrapper.__init__(self, child,
pre='\n'.join(statements) + '\n\n')
class CGIfWrapper(CGWrapper):
def __init__(self, condition, child):
pre = CGWrapper(CGGeneric(condition), pre="if ", post=" {\n",
reindent=True)
CGWrapper.__init__(self, CGIndenter(child), pre=pre.define(),
post="\n}")
class CGTemplatedType(CGWrapper):
def __init__(self, templateName, child):
CGWrapper.__init__(self, child, pre=templateName + "<", post=">")
class CGNamespace(CGWrapper):
def __init__(self, namespace, child, public=False):
pre = "%smod %s {\n" % ("pub " if public else "", namespace)
post = "} // mod %s" % namespace
CGWrapper.__init__(self, child, pre=pre, post=post)
@staticmethod
def build(namespaces, child, public=False):
"""
Static helper method to build multiple wrapped namespaces.
"""
if not namespaces:
return child
inner = CGNamespace.build(namespaces[1:], child, public=public)
return CGNamespace(namespaces[0], inner, public=public)
def DOMClassTypeId(desc):
protochain = desc.prototypeChain
inner = ""
if desc.hasDescendants():
if desc.interface.getExtendedAttribute("Abstract"):
return "::dom::bindings::codegen::InheritTypes::TopTypeId::Abstract"
name = desc.interface.identifier.name
inner = "(::dom::bindings::codegen::InheritTypes::%sTypeId::%s)" % (name, name)
elif len(protochain) == 1:
return "::dom::bindings::codegen::InheritTypes::TopTypeId::Alone"
reversed_protochain = list(reversed(protochain))
for (child, parent) in zip(reversed_protochain, reversed_protochain[1:]):
inner = "(::dom::bindings::codegen::InheritTypes::%sTypeId::%s%s)" % (parent, child, inner)
return "::dom::bindings::codegen::InheritTypes::TopTypeId::%s%s" % (protochain[0], inner)
def DOMClass(descriptor):
protoList = ['PrototypeList::ID::' + proto for proto in descriptor.prototypeChain]
# Pad out the list to the right length with ID::Count so we
# guarantee that all the lists are the same length. id::Count
# is never the ID of any prototype, so it's safe to use as
# padding.
protoList.extend(['PrototypeList::ID::Count'] * (descriptor.config.maxProtoChainLength - len(protoList)))
prototypeChainString = ', '.join(protoList)
heapSizeOf = 'heap_size_of_raw_self_and_children::<%s>' % descriptor.interface.identifier.name
return """\
DOMClass {
interface_chain: [ %s ],
native_hooks: &sNativePropertyHooks,
type_id: %s,
heap_size_of: %s as unsafe fn(_) -> _,
}""" % (prototypeChainString, DOMClassTypeId(descriptor), heapSizeOf)
class CGDOMJSClass(CGThing):
"""
Generate a DOMJSClass for a given descriptor
"""
def __init__(self, descriptor):
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
traceHook = 'Some(%s)' % TRACE_HOOK_NAME
if self.descriptor.isGlobal():
assert not self.descriptor.weakReferenceable
traceHook = "Some(js::jsapi::JS_GlobalObjectTraceHook)"
flags = "JSCLASS_IS_GLOBAL | JSCLASS_DOM_GLOBAL"
slots = "JSCLASS_GLOBAL_SLOT_COUNT + 1"
else:
flags = "0"
if self.descriptor.weakReferenceable:
slots = "2"
else:
slots = "1"
return """\
static Class: DOMJSClass = DOMJSClass {
base: js::jsapi::Class {
name: %s as *const u8 as *const libc::c_char,
flags: JSCLASS_IS_DOMJSCLASS | JSCLASS_IMPLEMENTS_BARRIERS | %s |
(((%s) & JSCLASS_RESERVED_SLOTS_MASK) <<
JSCLASS_RESERVED_SLOTS_SHIFT), //JSCLASS_HAS_RESERVED_SLOTS(%s),
addProperty: None,
delProperty: None,
getProperty: None,
setProperty: None,
enumerate: None,
resolve: None,
convert: None,
finalize: Some(%s),
call: None,
hasInstance: None,
construct: None,
trace: %s,
spec: js::jsapi::ClassSpec {
createConstructor: None,
createPrototype: None,
constructorFunctions: 0 as *const js::jsapi::JSFunctionSpec,
constructorProperties: 0 as *const js::jsapi::JSPropertySpec,
prototypeFunctions: 0 as *const js::jsapi::JSFunctionSpec,
prototypeProperties: 0 as *const js::jsapi::JSPropertySpec,
finishInit: None,
flags: 0,
},
ext: js::jsapi::ClassExtension {
outerObject: %s,
innerObject: None,
isWrappedNative: false,
weakmapKeyDelegateOp: None,
objectMovedOp: None,
},
ops: js::jsapi::ObjectOps {
lookupProperty: None,
defineProperty: None,
hasProperty: None,
getProperty: None,
setProperty: None,
getOwnPropertyDescriptor: None,
deleteProperty: None,
watch: None,
unwatch: None,
getElements: None,
enumerate: None,
thisObject: %s,
},
},
dom_class: %s
};""" % (str_to_const_array(self.descriptor.interface.identifier.name),
flags, slots, slots,
FINALIZE_HOOK_NAME, traceHook,
self.descriptor.outerObjectHook,
self.descriptor.outerObjectHook,
CGGeneric(DOMClass(self.descriptor)).define())
def str_to_const_array(s):
return "b\"%s\\0\"" % s
class CGPrototypeJSClass(CGThing):
def __init__(self, descriptor):
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
name = str_to_const_array(self.descriptor.interface.identifier.name + "Prototype")
slotCount = 0
if self.descriptor.hasUnforgeableMembers:
slotCount += 1
return """\
static PrototypeClass: JSClass = JSClass {
name: %(name)s as *const u8 as *const libc::c_char,
flags:
// JSCLASS_HAS_RESERVED_SLOTS(%(slotCount)s)
(%(slotCount)s & JSCLASS_RESERVED_SLOTS_MASK) << JSCLASS_RESERVED_SLOTS_SHIFT,
addProperty: None,
delProperty: None,
getProperty: None,
setProperty: None,
enumerate: None,
resolve: None,
convert: None,
finalize: None,
call: None,
hasInstance: None,
construct: None,
trace: None,
reserved: [0 as *mut libc::c_void; 25]
};
""" % {'name': name, 'slotCount': slotCount}
class CGInterfaceObjectJSClass(CGThing):
def __init__(self, descriptor):
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
if True:
return ""
ctorname = "0 as *const u8" if not self.descriptor.interface.ctor() else CONSTRUCT_HOOK_NAME
hasinstance = HASINSTANCE_HOOK_NAME
return """\
const InterfaceObjectClass: JSClass = {
%s, 0,
JS_PropertyStub,
JS_PropertyStub,
JS_PropertyStub,
JS_StrictPropertyStub,
JS_EnumerateStub,
JS_ResolveStub,
JS_ConvertStub,
0 as *const u8,
0 as *const u8,
%s,
%s,
%s,
0 as *const u8,
JSCLASS_NO_INTERNAL_MEMBERS
};
""" % (str_to_const_array("Function"), ctorname, hasinstance, ctorname)
class CGList(CGThing):
"""
Generate code for a list of GCThings. Just concatenates them together, with
an optional joiner string. "\n" is a common joiner.
"""
def __init__(self, children, joiner=""):
CGThing.__init__(self)
self.children = children
self.joiner = joiner
def append(self, child):
self.children.append(child)
def prepend(self, child):
self.children.insert(0, child)
def join(self, generator):
return self.joiner.join(filter(lambda s: len(s) > 0, (child for child in generator)))
def define(self):
return self.join(child.define() for child in self.children if child is not None)
class CGIfElseWrapper(CGList):
def __init__(self, condition, ifTrue, ifFalse):
kids = [CGIfWrapper(condition, ifTrue),
CGWrapper(CGIndenter(ifFalse), pre=" else {\n", post="\n}")]
CGList.__init__(self, kids)
class CGGeneric(CGThing):
"""
A class that spits out a fixed string into the codegen. Can spit out a
separate string for the declaration too.
"""
def __init__(self, text):
self.text = text
def define(self):
return self.text
class CGCallbackTempRoot(CGGeneric):
def __init__(self, name):
CGGeneric.__init__(self, "%s::new(${val}.get().to_object())" % name)
def getAllTypes(descriptors, dictionaries, callbacks):
"""
Generate all the types we're dealing with. For each type, a tuple
containing type, descriptor, dictionary is yielded. The
descriptor and dictionary can be None if the type does not come
from a descriptor or dictionary; they will never both be non-None.
"""
for d in descriptors:
for t in getTypesFromDescriptor(d):
yield (t, d, None)
for dictionary in dictionaries:
for t in getTypesFromDictionary(dictionary):
yield (t, None, dictionary)
for callback in callbacks:
for t in getTypesFromCallback(callback):
yield (t, None, None)
def SortedTuples(l):
"""
Sort a list of tuples based on the first item in the tuple
"""
return sorted(l, key=operator.itemgetter(0))
def SortedDictValues(d):
"""
Returns a list of values from the dict sorted by key.
"""
# Create a list of tuples containing key and value, sorted on key.
d = SortedTuples(d.items())
# We're only interested in the values.
return (i[1] for i in d)
def UnionTypes(descriptors, dictionaries, callbacks, config):
"""
Returns a CGList containing CGUnionStructs for every union.
"""
imports = [
'dom::bindings::codegen::PrototypeList',
'dom::bindings::conversions::FromJSValConvertible',
'dom::bindings::conversions::ToJSValConvertible',
'dom::bindings::conversions::ConversionBehavior',
'dom::bindings::conversions::root_from_handlevalue',
'dom::bindings::conversions::StringificationBehavior',
'dom::bindings::error::throw_not_in_union',
'dom::bindings::js::Root',
'dom::bindings::str::USVString',
'dom::types::*',
'js::jsapi::JSContext',
'js::jsapi::{HandleValue, MutableHandleValue}',
'js::jsval::JSVal',
'util::str::DOMString',
]
# Now find all the things we'll need as arguments and return values because
# we need to wrap or unwrap them.
unionStructs = dict()
for (t, descriptor, dictionary) in getAllTypes(descriptors, dictionaries, callbacks):
assert not descriptor or not dictionary
t = t.unroll()
if not t.isUnion():
continue
name = str(t)
if name not in unionStructs:
provider = descriptor or config.getDescriptorProvider()
unionStructs[name] = CGList([
CGUnionStruct(t, provider),
CGUnionConversionStruct(t, provider)
])
return CGImports(CGList(SortedDictValues(unionStructs), "\n\n"), [], [], imports, ignored_warnings=[])
class Argument():
"""
A class for outputting the type and name of an argument
"""
def __init__(self, argType, name, default=None, mutable=False):
self.argType = argType
self.name = name
self.default = default
self.mutable = mutable
def declare(self):
string = ('mut ' if self.mutable else '') + self.name + ((': ' + self.argType) if self.argType else '')
# XXXjdm Support default arguments somehow :/
# if self.default is not None:
# string += " = " + self.default
return string
def define(self):
return self.argType + ' ' + self.name
class CGAbstractMethod(CGThing):
"""
An abstract class for generating code for a method. Subclasses
should override definition_body to create the actual code.
descriptor is the descriptor for the interface the method is associated with
name is the name of the method as a string
returnType is the IDLType of the return value
args is a list of Argument objects
inline should be True to generate an inline method, whose body is
part of the declaration.
alwaysInline should be True to generate an inline method annotated with
MOZ_ALWAYS_INLINE.
If templateArgs is not None it should be a list of strings containing
template arguments, and the function will be templatized using those
arguments.
docs is None or documentation for the method in a string.
"""
def __init__(self, descriptor, name, returnType, args, inline=False,
alwaysInline=False, extern=False, pub=False, templateArgs=None,
unsafe=False, docs=None):
CGThing.__init__(self)
self.descriptor = descriptor
self.name = name
self.returnType = returnType
self.args = args
self.alwaysInline = alwaysInline
self.extern = extern
self.templateArgs = templateArgs
self.pub = pub
self.unsafe = unsafe
self.docs = docs
def _argstring(self):
return ', '.join([a.declare() for a in self.args])
def _template(self):
if self.templateArgs is None:
return ''
return '<%s>\n' % ', '.join(self.templateArgs)
def _docs(self):
if self.docs is None:
return ''
lines = self.docs.splitlines()
return ''.join('/// %s\n' % line for line in lines)
def _decorators(self):
decorators = []
if self.alwaysInline:
decorators.append('#[inline]')
if self.extern:
decorators.append('unsafe')
decorators.append('extern')
if self.pub:
decorators.append('pub')
if not decorators:
return ''
return ' '.join(decorators) + ' '
def _returnType(self):
return (" -> %s" % self.returnType) if self.returnType != "void" else ""
def define(self):
body = self.definition_body()
# Method will already be marked `unsafe` if `self.extern == True`
if self.unsafe and not self.extern:
body = CGWrapper(CGIndenter(body), pre="unsafe {\n", post="\n}")
return CGWrapper(CGIndenter(body),
pre=self.definition_prologue(),
post=self.definition_epilogue()).define()
def definition_prologue(self):
return "%s%sfn %s%s(%s)%s {\n" % (self._docs(), self._decorators(),
self.name, self._template(),
self._argstring(), self._returnType())
def definition_epilogue(self):
return "\n}\n"
def definition_body(self):
raise NotImplementedError # Override me!
def CreateBindingJSObject(descriptor, parent=None):
create = "let raw = Box::into_raw(object);\nlet _rt = RootedTraceable::new(&*raw);\n"
if descriptor.proxy:
assert not descriptor.isGlobal()
create += """
let handler = RegisterBindings::proxy_handlers[PrototypeList::Proxies::%s as usize];
let private = RootedValue::new(cx, PrivateValue(raw as *const libc::c_void));
let obj = NewProxyObject(cx, handler,
private.handle(),
proto.ptr, %s.get(),
ptr::null_mut(), ptr::null_mut());
assert!(!obj.is_null());
let obj = RootedObject::new(cx, obj);\
""" % (descriptor.name, parent)
elif descriptor.isGlobal():
create += ("let obj = RootedObject::new(\n"
" cx,\n"
" create_dom_global(\n"
" cx,\n"
" &Class.base as *const js::jsapi::Class as *const JSClass,\n"
" raw as *const libc::c_void,\n"
" Some(%s))\n"
");\n"
"assert!(!obj.ptr.is_null());" % TRACE_HOOK_NAME)
else:
create += ("let obj = RootedObject::new(cx, JS_NewObjectWithGivenProto(\n"
" cx, &Class.base as *const js::jsapi::Class as *const JSClass, proto.handle()));\n"
"assert!(!obj.ptr.is_null());\n"
"\n"
"JS_SetReservedSlot(obj.ptr, DOM_OBJECT_SLOT,\n"
" PrivateValue(raw as *const libc::c_void));")
if descriptor.weakReferenceable:
create += """
JS_SetReservedSlot(obj.ptr, DOM_WEAK_SLOT, PrivateValue(ptr::null()));"""
return create
def InitUnforgeablePropertiesOnHolder(descriptor, properties):
"""
Define the unforgeable properties on the unforgeable holder for
the interface represented by descriptor.
properties is a PropertyArrays instance.
"""
unforgeables = []
defineUnforgeableAttrs = "define_properties(cx, unforgeable_holder.handle(), %s).unwrap();"
defineUnforgeableMethods = "define_methods(cx, unforgeable_holder.handle(), %s).unwrap();"
unforgeableMembers = [
(defineUnforgeableAttrs, properties.unforgeable_attrs),
(defineUnforgeableMethods, properties.unforgeable_methods),
]
for template, array in unforgeableMembers:
if array.length() > 0:
unforgeables.append(CGGeneric(template % array.variableName()))
return CGList(unforgeables, "\n")
def CopyUnforgeablePropertiesToInstance(descriptor):
"""
Copy the unforgeable properties from the unforgeable holder for
this interface to the instance object we have.
"""
if not descriptor.hasUnforgeableMembers:
return ""
copyCode = ""
# For proxies, we want to define on the expando object, not directly on the
# reflector, so we can make sure we don't get confused by named getters.
if descriptor.proxy:
copyCode += """\
let expando = RootedObject::new(cx, ensure_expando_object(cx, obj.handle()));
"""
obj = "expando"
else:
obj = "obj"
# We can't do the fast copy for globals, because we can't allocate the
# unforgeable holder for those with the right JSClass. Luckily, there
# aren't too many globals being created.
if descriptor.isGlobal():
copyFunc = "JS_CopyPropertiesFrom"
else:
copyFunc = "JS_InitializePropertiesFromCompatibleNativeObject"
copyCode += """\
let mut unforgeable_holder = RootedObject::new(cx, ptr::null_mut());
unforgeable_holder.handle_mut().set(
JS_GetReservedSlot(proto.ptr, DOM_PROTO_UNFORGEABLE_HOLDER_SLOT).to_object());
assert!(%(copyFunc)s(cx, %(obj)s.handle(), unforgeable_holder.handle()));
""" % {'copyFunc': copyFunc, 'obj': obj}
return copyCode
class CGWrapMethod(CGAbstractMethod):
"""
Class that generates the FooBinding::Wrap function for non-callback
interfaces.
"""
def __init__(self, descriptor):
assert not descriptor.interface.isCallback()
if not descriptor.isGlobal():
args = [Argument('*mut JSContext', 'cx'), Argument('GlobalRef', 'scope'),
Argument("Box<%s>" % descriptor.concreteType, 'object')]
else:
args = [Argument('*mut JSContext', 'cx'),
Argument("Box<%s>" % descriptor.concreteType, 'object')]
retval = 'Root<%s>' % descriptor.concreteType
CGAbstractMethod.__init__(self, descriptor, 'Wrap', retval, args,
pub=True, unsafe=True)
def definition_body(self):
unforgeable = CopyUnforgeablePropertiesToInstance(self.descriptor)
if not self.descriptor.isGlobal():
create = CreateBindingJSObject(self.descriptor, "scope")
return CGGeneric("""\
let _ar = JSAutoRequest::new(cx);
let scope = scope.reflector().get_jsobject();
assert!(!scope.get().is_null());
assert!(((*JS_GetClass(scope.get())).flags & JSCLASS_IS_GLOBAL) != 0);
let mut proto = RootedObject::new(cx, ptr::null_mut());
let _ac = JSAutoCompartment::new(cx, scope.get());
GetProtoObject(cx, scope, scope, proto.handle_mut());
assert!(!proto.ptr.is_null());
%(createObject)s
%(copyUnforgeable)s
(*raw).init_reflector(obj.ptr);
Root::from_ref(&*raw)""" % {'copyUnforgeable': unforgeable, 'createObject': create})
else:
create = CreateBindingJSObject(self.descriptor)
return CGGeneric("""\
let _ar = JSAutoRequest::new(cx);
%(createObject)s
let _ac = JSAutoCompartment::new(cx, obj.ptr);
let mut proto = RootedObject::new(cx, ptr::null_mut());
GetProtoObject(cx, obj.handle(), obj.handle(), proto.handle_mut());
JS_SetPrototype(cx, obj.handle(), proto.handle());
%(copyUnforgeable)s
(*raw).init_reflector(obj.ptr);
let ret = Root::from_ref(&*raw);
RegisterBindings::Register(cx, obj.handle());
ret""" % {'copyUnforgeable': unforgeable, 'createObject': create})
class CGIDLInterface(CGThing):
"""
Class for codegen of an implementation of the IDLInterface trait.
"""
def __init__(self, descriptor):
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
interface = self.descriptor.interface
name = self.descriptor.name
if (interface.getUserData("hasConcreteDescendant", False) or
interface.getUserData("hasProxyDescendant", False)):
depth = len(self.descriptor.prototypeChain)
check = "class.interface_chain[%s] == PrototypeList::ID::%s" % (depth - 1, name)
elif self.descriptor.proxy:
check = "class as *const _ == &Class as *const _"
else:
check = "class as *const _ == &Class.dom_class as *const _"
return """\
impl IDLInterface for %(name)s {
#[inline]
fn derives(class: &'static DOMClass) -> bool {
%(check)s
}
}
""" % {'check': check, 'name': name}
class CGAbstractExternMethod(CGAbstractMethod):
"""
Abstract base class for codegen of implementation-only (no
declaration) static methods.
"""
def __init__(self, descriptor, name, returnType, args):
CGAbstractMethod.__init__(self, descriptor, name, returnType, args,
inline=False, extern=True)
class PropertyArrays():
def __init__(self, descriptor):
self.static_methods = MethodDefiner(descriptor, "StaticMethods",
static=True, unforgeable=False)
self.static_attrs = AttrDefiner(descriptor, "StaticAttributes",
static=True, unforgeable=False)
self.methods = MethodDefiner(descriptor, "Methods", static=False, unforgeable=False)
self.unforgeable_methods = MethodDefiner(descriptor, "UnforgeableMethods",
static=False, unforgeable=True)
self.attrs = AttrDefiner(descriptor, "Attributes", static=False, unforgeable=False)
self.unforgeable_attrs = AttrDefiner(descriptor, "UnforgeableAttributes",
static=False, unforgeable=True)
self.consts = ConstDefiner(descriptor, "Constants")
pass
@staticmethod
def arrayNames():
return [
"static_methods",
"static_attrs",
"methods",
"unforgeable_methods",
"attrs",
"unforgeable_attrs",
"consts",
]
def variableNames(self):
names = {}
for array in self.arrayNames():
names[array] = getattr(self, array).variableName()
return names
def __str__(self):
define = ""
for array in self.arrayNames():
define += str(getattr(self, array))
return define
class CGNativeProperties(CGThing):
def __init__(self, descriptor, properties):
CGThing.__init__(self)
self.properties = properties
def define(self):
def getField(array):
propertyArray = getattr(self.properties, array)
if propertyArray.length() > 0:
value = "Some(%s)" % propertyArray.variableName()
else:
value = "None"
return CGGeneric(string.Template('${name}: ${value},').substitute({
'name': array,
'value': value,
}))
nativeProps = CGList([getField(array) for array in self.properties.arrayNames()], '\n')
return CGWrapper(CGIndenter(nativeProps),
pre="static sNativeProperties: NativeProperties = NativeProperties {\n",
post="\n};\n").define()
class CGCreateInterfaceObjectsMethod(CGAbstractMethod):
"""
Generate the CreateInterfaceObjects method for an interface descriptor.
properties should be a PropertyArrays instance.
"""
def __init__(self, descriptor, properties):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'global'),
Argument('HandleObject', 'receiver'),
Argument('MutableHandleObject', 'rval')]
CGAbstractMethod.__init__(self, descriptor, 'CreateInterfaceObjects', 'void', args)
self.properties = properties
def definition_body(self):
protoChain = self.descriptor.prototypeChain
if len(protoChain) == 1:
self.unsafe = True
getParentProto = "parent_proto.ptr = JS_GetObjectPrototype(cx, global)"
else:
parentProtoName = self.descriptor.prototypeChain[-2]
getParentProto = ("%s::GetProtoObject(cx, global, receiver, parent_proto.handle_mut())" %
toBindingNamespace(parentProtoName))
getParentProto = ("let mut parent_proto = RootedObject::new(cx, ptr::null_mut());\n"
"%s;\n"
"assert!(!parent_proto.ptr.is_null());\n") % getParentProto
if self.descriptor.interface.isCallback():
protoClass = "None"
else:
protoClass = "Some(&PrototypeClass)"
if self.descriptor.interface.hasInterfaceObject():
if self.descriptor.interface.ctor():
constructHook = CONSTRUCT_HOOK_NAME
constructArgs = methodLength(self.descriptor.interface.ctor())
else:
constructHook = "throwing_constructor"
constructArgs = 0
constructor = 'Some((%s as NonNullJSNative, "%s", %d))' % (
constructHook, self.descriptor.interface.identifier.name,
constructArgs)
else:
constructor = 'None'
call = """\
do_create_interface_objects(cx, receiver, parent_proto.handle(),
%s, %s,
&named_constructors,
&sNativeProperties, rval);""" % (protoClass, constructor)
createArray = """\
let named_constructors: [(NonNullJSNative, &'static str, u32); %d] = [
""" % len(self.descriptor.interface.namedConstructors)
for ctor in self.descriptor.interface.namedConstructors:
constructHook = CONSTRUCT_HOOK_NAME + "_" + ctor.identifier.name
constructArgs = methodLength(ctor)
constructor = '(%s as NonNullJSNative, "%s", %d)' % (
constructHook, ctor.identifier.name, constructArgs)
createArray += constructor
createArray += ","
createArray += "];"
if self.descriptor.hasUnforgeableMembers:
# We want to use the same JSClass and prototype as the object we'll
# end up defining the unforgeable properties on in the end, so that
# we can use JS_InitializePropertiesFromCompatibleNativeObject to do
# a fast copy. In the case of proxies that's null, because the
# expando object is a vanilla object, but in the case of other DOM
# objects it's whatever our class is.
#
# Also, for a global we can't use the global's class; just use
# nullpr and when we do the copy off the holder we'll take a slower
# path. This also means that we don't need to worry about matching
# the prototype.
if self.descriptor.proxy or self.descriptor.isGlobal():
holderClass = "ptr::null()"
holderProto = "ptr::null_mut()"
else:
holderClass = "&Class.base as *const js::jsapi::Class as *const JSClass"
holderProto = "rval.get()"
# JS_NewObjectWithoutMetadata() is unsafe.
self.unsafe = True
createUnforgeableHolder = CGGeneric("""
let mut unforgeable_holder = RootedObject::new(cx, ptr::null_mut());
{
let holder_class = %(holderClass)s;
let holder_proto = RootedObject::new(cx, %(holderProto)s);
unforgeable_holder.handle_mut().set(
JS_NewObjectWithoutMetadata(cx, holder_class, holder_proto.handle()));
assert!(!unforgeable_holder.ptr.is_null());
}""" % {'holderClass': holderClass, 'holderProto': holderProto})
defineUnforgeables = InitUnforgeablePropertiesOnHolder(self.descriptor,
self.properties)
createUnforgeableHolder = CGList(
[createUnforgeableHolder, defineUnforgeables], "\n")
installUnforgeableHolder = CGGeneric("""\
JS_SetReservedSlot(rval.get(), DOM_PROTO_UNFORGEABLE_HOLDER_SLOT,
ObjectValue(&*unforgeable_holder.ptr))""")
unforgeableHolderSetup = CGList(
[createUnforgeableHolder, installUnforgeableHolder], "\n")
else:
unforgeableHolderSetup = None
return CGList([
CGGeneric(getParentProto),
CGGeneric(createArray),
CGGeneric(call % self.properties.variableNames()),
unforgeableHolderSetup,
], "\n")
class CGGetPerInterfaceObject(CGAbstractMethod):
"""
A method for getting a per-interface object (a prototype object or interface
constructor object).
"""
def __init__(self, descriptor, name, idPrefix="", pub=False):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'global'),
Argument('HandleObject', 'receiver'),
Argument('MutableHandleObject', 'rval')]
CGAbstractMethod.__init__(self, descriptor, name,
'void', args, pub=pub, unsafe=True)
self.id = idPrefix + "ID::" + self.descriptor.name
def definition_body(self):
return CGGeneric("""
/* global and receiver are usually the same, but they can be different
too. For example a sandbox often has an xray wrapper for a window as the
prototype of the sandbox's global. In that case receiver is the xray
wrapper and global is the sandbox's global.
*/
assert!(((*JS_GetClass(global.get())).flags & JSCLASS_DOM_GLOBAL) != 0);
/* Check to see whether the interface objects are already installed */
let proto_or_iface_array = get_proto_or_iface_array(global.get());
rval.set((*proto_or_iface_array)[%s as usize]);
if !rval.get().is_null() {
return;
}
CreateInterfaceObjects(cx, global, receiver, rval);
assert!(!rval.get().is_null());
(*proto_or_iface_array)[%s as usize] = rval.get();
if <*mut JSObject>::needs_post_barrier(rval.get()) {
<*mut JSObject>::post_barrier((*proto_or_iface_array).as_mut_ptr().offset(%s as isize))
}
""" % (self.id, self.id, self.id))
class CGGetProtoObjectMethod(CGGetPerInterfaceObject):
"""
A method for getting the interface prototype object.
"""
def __init__(self, descriptor):
CGGetPerInterfaceObject.__init__(self, descriptor, "GetProtoObject",
"PrototypeList::", pub=True)
def definition_body(self):
return CGList([
CGGeneric("""\
/* Get the interface prototype object for this class. This will create the
object as needed. */"""),
CGGetPerInterfaceObject.definition_body(self),
])
class CGGetConstructorObjectMethod(CGGetPerInterfaceObject):
"""
A method for getting the interface constructor object.
"""
def __init__(self, descriptor):
CGGetPerInterfaceObject.__init__(self, descriptor, "GetConstructorObject",
"constructors::")
def definition_body(self):
return CGList([
CGGeneric("""\
/* Get the interface object for this class. This will create the object as
needed. */"""),
CGGetPerInterfaceObject.definition_body(self),
])
class CGDefineProxyHandler(CGAbstractMethod):
"""
A method to create and cache the proxy trap for a given interface.
"""
def __init__(self, descriptor):
assert descriptor.proxy
CGAbstractMethod.__init__(self, descriptor, 'DefineProxyHandler',
'*const libc::c_void', [],
pub=True, unsafe=True)
def define(self):
return CGAbstractMethod.define(self)
def definition_body(self):
customDefineProperty = 'proxyhandler::define_property'
if self.descriptor.operations['IndexedSetter'] or self.descriptor.operations['NamedSetter']:
customDefineProperty = 'defineProperty'
customDelete = 'proxyhandler::delete'
if self.descriptor.operations['NamedDeleter']:
customDelete = 'delete'
body = """\
let traps = ProxyTraps {
enter: None,
getOwnPropertyDescriptor: Some(getOwnPropertyDescriptor),
defineProperty: Some(%s),
ownPropertyKeys: Some(own_property_keys),
delete_: Some(%s),
enumerate: None,
preventExtensions: Some(proxyhandler::prevent_extensions),
isExtensible: Some(proxyhandler::is_extensible),
has: None,
get: Some(get),
set: None,
call: None,
construct: None,
getPropertyDescriptor: Some(get_property_descriptor),
hasOwn: Some(hasOwn),
getOwnEnumerablePropertyKeys: None,
nativeCall: None,
hasInstance: None,
objectClassIs: None,
className: Some(className),
fun_toString: None,
boxedValue_unbox: None,
defaultValue: None,
trace: Some(%s),
finalize: Some(%s),
objectMoved: None,
isCallable: None,
isConstructor: None,
};
CreateProxyHandler(&traps, &Class as *const _ as *const _)\
""" % (customDefineProperty, customDelete, TRACE_HOOK_NAME, FINALIZE_HOOK_NAME)
return CGGeneric(body)
class CGDefineDOMInterfaceMethod(CGAbstractMethod):
"""
A method for resolve hooks to try to lazily define the interface object for
a given interface.
"""
def __init__(self, descriptor):
assert descriptor.interface.hasInterfaceObject()
args = [
Argument('*mut JSContext', 'cx'),
Argument('HandleObject', 'global'),
]
CGAbstractMethod.__init__(self, descriptor, 'DefineDOMInterface', 'void', args, pub=True)
def define(self):
return CGAbstractMethod.define(self)
def definition_body(self):
if self.descriptor.interface.isCallback():
code = """\
let mut obj = RootedObject::new(cx, ptr::null_mut());
CreateInterfaceObjects(cx, global, global, obj.handle_mut());
"""
else:
code = """\
let mut proto = RootedObject::new(cx, ptr::null_mut());
GetProtoObject(cx, global, global, proto.handle_mut());
assert!(!proto.ptr.is_null());
"""
return CGGeneric("assert!(!global.get().is_null());\n" + code)
def needCx(returnType, arguments, considerTypes):
return (considerTypes and
(typeNeedsCx(returnType, True) or
any(typeNeedsCx(a.type) for a in arguments)))
class CGCallGenerator(CGThing):
"""
A class to generate an actual call to a C++ object. Assumes that the C++
object is stored in a variable whose name is given by the |object| argument.
errorResult should be a string for the value to return in case of an
exception from the native code, or None if no error reporting is needed.
"""
def __init__(self, errorResult, arguments, argsPre, returnType,
extendedAttributes, descriptorProvider, nativeMethodName,
static, object="this"):
CGThing.__init__(self)
assert errorResult is None or isinstance(errorResult, str)
isFallible = errorResult is not None
result = getRetvalDeclarationForType(returnType, descriptorProvider)
if isFallible:
result = CGWrapper(result, pre="Result<", post=", Error>")
args = CGList([CGGeneric(arg) for arg in argsPre], ", ")
for (a, name) in arguments:
# XXXjdm Perhaps we should pass all nontrivial types by borrowed pointer
if a.type.isDictionary():
name = "&" + name
args.append(CGGeneric(name))
needsCx = needCx(returnType, (a for (a, _) in arguments), True)
if "cx" not in argsPre and needsCx:
args.prepend(CGGeneric("cx"))
# Build up our actual call
self.cgRoot = CGList([], "\n")
call = CGGeneric(nativeMethodName)
if static:
call = CGWrapper(call, pre="%s::" % descriptorProvider.interface.identifier.name)
else:
call = CGWrapper(call, pre="%s." % object)
call = CGList([call, CGWrapper(args, pre="(", post=")")])
self.cgRoot.append(CGList([
CGGeneric("let result: "),
result,
CGGeneric(" = "),
call,
CGGeneric(";"),
]))
if isFallible:
if static:
glob = ""
else:
glob = " let global = global_root_from_reflector(this);\n"
self.cgRoot.append(CGGeneric(
"let result = match result {\n"
" Ok(result) => result,\n"
" Err(e) => {\n"
"%s"
" throw_dom_exception(cx, global.r(), e);\n"
" return%s;\n"
" },\n"
"};" % (glob, errorResult)))
def define(self):
return self.cgRoot.define()
class CGPerSignatureCall(CGThing):
"""
This class handles the guts of generating code for a particular
call signature. A call signature consists of four things:
1) A return type, which can be None to indicate that there is no
actual return value (e.g. this is an attribute setter) or an
IDLType if there's an IDL type involved (including |void|).
2) An argument list, which is allowed to be empty.
3) A name of a native method to call.
4) Whether or not this method is static.
We also need to know whether this is a method or a getter/setter
to do error reporting correctly.
The idlNode parameter can be either a method or an attr. We can query
|idlNode.identifier| in both cases, so we can be agnostic between the two.
"""
# XXXbz For now each entry in the argument list is either an
# IDLArgument or a FakeArgument, but longer-term we may want to
# have ways of flagging things like JSContext* or optional_argc in
# there.
def __init__(self, returnType, argsPre, arguments, nativeMethodName, static,
descriptor, idlNode, argConversionStartsAt=0,
getter=False, setter=False):
CGThing.__init__(self)
self.returnType = returnType
self.descriptor = descriptor
self.idlNode = idlNode
self.extendedAttributes = descriptor.getExtendedAttributes(idlNode,
getter=getter,
setter=setter)
self.argsPre = argsPre
self.arguments = arguments
self.argCount = len(arguments)
cgThings = []
cgThings.extend([CGArgumentConverter(arguments[i], i, self.getArgs(),
self.getArgc(), self.descriptor,
invalidEnumValueFatal=not setter) for
i in range(argConversionStartsAt, self.argCount)])
errorResult = None
if self.isFallible():
errorResult = " false"
cgThings.append(CGCallGenerator(
errorResult,
self.getArguments(), self.argsPre, returnType,
self.extendedAttributes, descriptor, nativeMethodName,
static))
self.cgRoot = CGList(cgThings, "\n")
def getArgs(self):
return "args" if self.argCount > 0 else ""
def getArgc(self):
return "argc"
def getArguments(self):
def process(arg, i):
argVal = "arg" + str(i)
if arg.type.isGeckoInterface() and not arg.type.unroll().inner.isCallback():
argVal += ".r()"
return argVal
return [(a, process(a, i)) for (i, a) in enumerate(self.arguments)]
def isFallible(self):
return 'infallible' not in self.extendedAttributes
def wrap_return_value(self):
return wrapForType('args.rval()')
def define(self):
return (self.cgRoot.define() + "\n" + self.wrap_return_value())
class CGSwitch(CGList):
"""
A class to generate code for a switch statement.
Takes three constructor arguments: an expression, a list of cases,
and an optional default.
Each case is a CGCase. The default is a CGThing for the body of
the default case, if any.
"""
def __init__(self, expression, cases, default=None):
CGList.__init__(self, [CGIndenter(c) for c in cases], "\n")
self.prepend(CGWrapper(CGGeneric(expression),
pre="match ", post=" {"))
if default is not None:
self.append(
CGIndenter(
CGWrapper(
CGIndenter(default),
pre="_ => {\n",
post="\n}"
)
)
)
self.append(CGGeneric("}"))
class CGCase(CGList):
"""
A class to generate code for a case statement.
Takes three constructor arguments: an expression, a CGThing for
the body (allowed to be None if there is no body), and an optional
argument (defaulting to False) for whether to fall through.
"""
def __init__(self, expression, body, fallThrough=False):
CGList.__init__(self, [], "\n")
self.append(CGWrapper(CGGeneric(expression), post=" => {"))
bodyList = CGList([body], "\n")
if fallThrough:
raise TypeError("fall through required but unsupported")
# bodyList.append(CGGeneric('panic!("fall through unsupported"); /* Fall through */'))
self.append(CGIndenter(bodyList))
self.append(CGGeneric("}"))
class CGGetterCall(CGPerSignatureCall):
"""
A class to generate a native object getter call for a particular IDL
getter.
"""
def __init__(self, argsPre, returnType, nativeMethodName, descriptor, attr):
CGPerSignatureCall.__init__(self, returnType, argsPre, [],
nativeMethodName, attr.isStatic(), descriptor,
attr, getter=True)
class FakeArgument():
"""
A class that quacks like an IDLArgument. This is used to make
setters look like method calls or for special operations.
"""
def __init__(self, type, interfaceMember, allowTreatNonObjectAsNull=False):
self.type = type
self.optional = False
self.variadic = False
self.defaultValue = None
self._allowTreatNonObjectAsNull = allowTreatNonObjectAsNull
self.treatNullAs = interfaceMember.treatNullAs
self.enforceRange = False
self.clamp = False
def allowTreatNonCallableAsNull(self):
return self._allowTreatNonObjectAsNull
class CGSetterCall(CGPerSignatureCall):
"""
A class to generate a native object setter call for a particular IDL
setter.
"""
def __init__(self, argsPre, argType, nativeMethodName, descriptor, attr):
CGPerSignatureCall.__init__(self, None, argsPre,
[FakeArgument(argType, attr, allowTreatNonObjectAsNull=True)],
nativeMethodName, attr.isStatic(), descriptor, attr,
setter=True)
def wrap_return_value(self):
# We have no return value
return "\nreturn true;"
def getArgc(self):
return "1"
class CGAbstractStaticBindingMethod(CGAbstractMethod):
"""
Common class to generate the JSNatives for all our static methods, getters
and setters. This will generate the function declaration and unwrap the
global object. Subclasses are expected to override the generate_code
function to do the rest of the work. This function should return a
CGThing which is already properly indented.
"""
def __init__(self, descriptor, name):
args = [
Argument('*mut JSContext', 'cx'),
Argument('libc::c_uint', 'argc'),
Argument('*mut JSVal', 'vp'),
]
CGAbstractMethod.__init__(self, descriptor, name, "bool", args, extern=True)
def definition_body(self):
preamble = CGGeneric("""\
let global = global_root_from_object(JS_CALLEE(cx, vp).to_object());
""")
return CGList([preamble, self.generate_code()])
def generate_code(self):
raise NotImplementedError # Override me!
class CGSpecializedMethod(CGAbstractExternMethod):
"""
A class for generating the C++ code for a specialized method that the JIT
can call with lower overhead.
"""
def __init__(self, descriptor, method):
self.method = method
name = method.identifier.name
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', '_obj'),
Argument('*const %s' % descriptor.concreteType, 'this'),
Argument('*const JSJitMethodCallArgs', 'args')]
CGAbstractExternMethod.__init__(self, descriptor, name, 'bool', args)
def definition_body(self):
nativeName = CGSpecializedMethod.makeNativeName(self.descriptor,
self.method)
return CGWrapper(CGMethodCall([], nativeName, self.method.isStatic(),
self.descriptor, self.method),
pre="let this = &*this;\n"
"let args = &*args;\n"
"let argc = args._base.argc_;\n")
@staticmethod
def makeNativeName(descriptor, method):
name = method.identifier.name
nativeName = descriptor.binaryNameFor(name)
if nativeName == name:
nativeName = descriptor.internalNameFor(name)
return MakeNativeName(nativeName)
class CGStaticMethod(CGAbstractStaticBindingMethod):
"""
A class for generating the Rust code for an IDL static method.
"""
def __init__(self, descriptor, method):
self.method = method
name = method.identifier.name
CGAbstractStaticBindingMethod.__init__(self, descriptor, name)
def generate_code(self):
nativeName = CGSpecializedMethod.makeNativeName(self.descriptor,
self.method)
setupArgs = CGGeneric("let args = CallArgs::from_vp(vp, argc);\n")
call = CGMethodCall(["global.r()"], nativeName, True, self.descriptor, self.method)
return CGList([setupArgs, call])
class CGSpecializedGetter(CGAbstractExternMethod):
"""
A class for generating the code for a specialized attribute getter
that the JIT can call with lower overhead.
"""
def __init__(self, descriptor, attr):
self.attr = attr
name = 'get_' + descriptor.internalNameFor(attr.identifier.name)
args = [Argument('*mut JSContext', 'cx'),
Argument('HandleObject', '_obj'),
Argument('*const %s' % descriptor.concreteType, 'this'),
Argument('JSJitGetterCallArgs', 'args')]
CGAbstractExternMethod.__init__(self, descriptor, name, "bool", args)
def definition_body(self):
nativeName = CGSpecializedGetter.makeNativeName(self.descriptor,
self.attr)
return CGWrapper(CGGetterCall([], self.attr.type, nativeName,
self.descriptor, self.attr),
pre="let this = &*this;\n")
@staticmethod
def makeNativeName(descriptor, attr):
name = attr.identifier.name
nativeName = descriptor.binaryNameFor(name)
if nativeName == name:
nativeName = descriptor.internalNameFor(name)
nativeName = MakeNativeName(nativeName)
infallible = ('infallible' in
descriptor.getExtendedAttributes(attr, getter=True))
if attr.type.nullable() or not infallible:
return "Get" + nativeName
return nativeName
class CGStaticGetter(CGAbstractStaticBindingMethod):
"""
A class for generating the C++ code for an IDL static attribute getter.
"""
def __init__(self, descriptor, attr):
self.attr = attr
name = 'get_' + attr.identifier.name
CGAbstractStaticBindingMethod.__init__(self, descriptor, name)
def generate_code(self):
nativeName = CGSpecializedGetter.makeNativeName(self.descriptor,
self.attr)
setupArgs = CGGeneric("let args = CallArgs::from_vp(vp, argc);\n")
call = CGGetterCall(["global.r()"], self.attr.type, nativeName, self.descriptor,
self.attr)
return CGList([setupArgs, call])
class CGSpecializedSetter(CGAbstractExternMethod):
"""
A class for generating the code for a specialized attribute setter
that the JIT can call with lower overhead.
"""
def __init__(self, descriptor, attr):
self.attr = attr
name = 'set_' + descriptor.internalNameFor(attr.identifier.name)
args = [Argument('*mut JSContext', 'cx'),
Argument('HandleObject', 'obj'),
Argument('*const %s' % descriptor.concreteType, 'this'),
Argument('JSJitSetterCallArgs', 'args')]
CGAbstractExternMethod.__init__(self, descriptor, name, "bool", args)
def definition_body(self):
nativeName = CGSpecializedSetter.makeNativeName(self.descriptor,
self.attr)
return CGWrapper(CGSetterCall([], self.attr.type, nativeName,
self.descriptor, self.attr),
pre="let this = &*this;\n")
@staticmethod
def makeNativeName(descriptor, attr):
name = attr.identifier.name
nativeName = descriptor.binaryNameFor(name)
if nativeName == name:
nativeName = descriptor.internalNameFor(name)
return "Set" + MakeNativeName(nativeName)
class CGStaticSetter(CGAbstractStaticBindingMethod):
"""
A class for generating the C++ code for an IDL static attribute setter.
"""
def __init__(self, descriptor, attr):
self.attr = attr
name = 'set_' + attr.identifier.name
CGAbstractStaticBindingMethod.__init__(self, descriptor, name)
def generate_code(self):
nativeName = CGSpecializedSetter.makeNativeName(self.descriptor,
self.attr)
checkForArg = CGGeneric(
"let args = CallArgs::from_vp(vp, argc);\n"
"if argc == 0 {\n"
" throw_type_error(cx, \"Not enough arguments to %s setter.\");\n"
" return false;\n"
"}" % self.attr.identifier.name)
call = CGSetterCall(["global.r()"], self.attr.type, nativeName, self.descriptor,
self.attr)
return CGList([checkForArg, call])
class CGSpecializedForwardingSetter(CGSpecializedSetter):
"""
A class for generating the code for an IDL attribute forwarding setter.
"""
def __init__(self, descriptor, attr):
CGSpecializedSetter.__init__(self, descriptor, attr)
def definition_body(self):
attrName = self.attr.identifier.name
forwardToAttrName = self.attr.getExtendedAttribute("PutForwards")[0]
# JS_GetProperty and JS_SetProperty can only deal with ASCII
assert all(ord(c) < 128 for c in attrName)
assert all(ord(c) < 128 for c in forwardToAttrName)
return CGGeneric("""\
let mut v = RootedValue::new(cx, UndefinedValue());
if !JS_GetProperty(cx, obj, %s as *const u8 as *const libc::c_char, v.handle_mut()) {
return false;
}
if !v.ptr.is_object() {
throw_type_error(cx, "Value.%s is not an object.");
return false;
}
let target_obj = RootedObject::new(cx, v.ptr.to_object());
JS_SetProperty(cx, target_obj.handle(), %s as *const u8 as *const libc::c_char, args.get(0))
""" % (str_to_const_array(attrName), attrName, str_to_const_array(forwardToAttrName)))
class CGMemberJITInfo(CGThing):
"""
A class for generating the JITInfo for a property that points to
our specialized getter and setter.
"""
def __init__(self, descriptor, member):
self.member = member
self.descriptor = descriptor
def defineJitInfo(self, infoName, opName, opType, infallible, movable,
aliasSet, alwaysInSlot, lazilyInSlot, slotIndex,
returnTypes, args):
"""
aliasSet is a JSJitInfo::AliasSet value, without the "JSJitInfo::" bit.
args is None if we don't want to output argTypes for some
reason (e.g. we have overloads or we're not a method) and
otherwise an iterable of the arguments for this method.
"""
assert not movable or aliasSet != "AliasEverything" # Can't move write-aliasing things
assert not alwaysInSlot or movable # Things always in slots had better be movable
def jitInfoInitializer(isTypedMethod):
initializer = fill(
"""
JSJitInfo {
call: ${opName} as *const ::libc::c_void,
protoID: PrototypeList::ID::${name} as u16,
depth: ${depth},
_bitfield_1:
JSJitInfo::new_bitfield_1(
OpType::${opType} as u8,
AliasSet::${aliasSet} as u8,
JSValueType::${returnType} as u8,
${isInfallible},
${isMovable},
${isAlwaysInSlot},
${isLazilyCachedInSlot},
${isTypedMethod},
${slotIndex} as u16,
)
}
""",
opName=opName,
name=self.descriptor.name,
depth=self.descriptor.interface.inheritanceDepth(),
opType=opType,
aliasSet=aliasSet,
returnType=reduce(CGMemberJITInfo.getSingleReturnType, returnTypes,
""),
isInfallible=toStringBool(infallible),
isMovable=toStringBool(movable),
isAlwaysInSlot=toStringBool(alwaysInSlot),
isLazilyCachedInSlot=toStringBool(lazilyInSlot),
isTypedMethod=toStringBool(isTypedMethod),
slotIndex=slotIndex)
return initializer.rstrip()
if args is not None:
argTypes = "%s_argTypes" % infoName
args = [CGMemberJITInfo.getJSArgType(arg.type) for arg in args]
args.append("ArgType::ArgTypeListEnd as i32")
argTypesDecl = (
"const %s: [i32; %d] = [ %s ];\n" %
(argTypes, len(args), ", ".join(args)))
return fill(
"""
$*{argTypesDecl}
const ${infoName}: JSTypedMethodJitInfo = JSTypedMethodJitInfo {
base: ${jitInfo},
argTypes: &${argTypes} as *const _ as *const ArgType,
};
""",
argTypesDecl=argTypesDecl,
infoName=infoName,
jitInfo=indent(jitInfoInitializer(True)),
argTypes=argTypes)
return ("\n"
"const %s: JSJitInfo = %s;\n"
% (infoName, jitInfoInitializer(False)))
def define(self):
if self.member.isAttr():
internalMemberName = self.descriptor.internalNameFor(self.member.identifier.name)
getterinfo = ("%s_getterinfo" % internalMemberName)
getter = ("get_%s" % internalMemberName)
getterinfal = "infallible" in self.descriptor.getExtendedAttributes(self.member, getter=True)
movable = self.mayBeMovable() and getterinfal
aliasSet = self.aliasSet()
isAlwaysInSlot = self.member.getExtendedAttribute("StoreInSlot")
if self.member.slotIndex is not None:
assert isAlwaysInSlot or self.member.getExtendedAttribute("Cached")
isLazilyCachedInSlot = not isAlwaysInSlot
slotIndex = memberReservedSlot(self.member) # noqa:FIXME: memberReservedSlot is not defined
# We'll statically assert that this is not too big in
# CGUpdateMemberSlotsMethod, in the case when
# isAlwaysInSlot is true.
else:
isLazilyCachedInSlot = False
slotIndex = "0"
result = self.defineJitInfo(getterinfo, getter, "Getter",
getterinfal, movable, aliasSet,
isAlwaysInSlot, isLazilyCachedInSlot,
slotIndex,
[self.member.type], None)
if (not self.member.readonly or self.member.getExtendedAttribute("PutForwards")):
setterinfo = ("%s_setterinfo" % internalMemberName)
setter = ("set_%s" % internalMemberName)
# Setters are always fallible, since they have to do a typed unwrap.
result += self.defineJitInfo(setterinfo, setter, "Setter",
False, False, "AliasEverything",
False, False, "0",
[BuiltinTypes[IDLBuiltinType.Types.void]],
None)
return result
if self.member.isMethod():
methodinfo = ("%s_methodinfo" % self.member.identifier.name)
method = ("%s" % self.member.identifier.name)
# Methods are infallible if they are infallible, have no arguments
# to unwrap, and have a return type that's infallible to wrap up for
# return.
sigs = self.member.signatures()
if len(sigs) != 1:
# Don't handle overloading. If there's more than one signature,
# one of them must take arguments.
methodInfal = False
args = None
movable = False
else:
sig = sigs[0]
# For methods that affect nothing, it's OK to set movable to our
# notion of infallible on the C++ side, without considering
# argument conversions, since argument conversions that can
# reliably throw would be effectful anyway and the jit doesn't
# move effectful things.
hasInfallibleImpl = "infallible" in self.descriptor.getExtendedAttributes(self.member)
movable = self.mayBeMovable() and hasInfallibleImpl
# XXXbz can we move the smarts about fallibility due to arg
# conversions into the JIT, using our new args stuff?
if (len(sig[1]) != 0):
# We have arguments or our return-value boxing can fail
methodInfal = False
else:
methodInfal = hasInfallibleImpl
# For now, only bother to output args if we're side-effect-free.
if self.member.affects == "Nothing":
args = sig[1]
else:
args = None
aliasSet = self.aliasSet()
result = self.defineJitInfo(methodinfo, method, "Method",
methodInfal, movable, aliasSet,
False, False, "0",
[s[0] for s in sigs], args)
return result
raise TypeError("Illegal member type to CGPropertyJITInfo")
def mayBeMovable(self):
"""
Returns whether this attribute or method may be movable, just
based on Affects/DependsOn annotations.
"""
affects = self.member.affects
dependsOn = self.member.dependsOn
assert affects in IDLInterfaceMember.AffectsValues
assert dependsOn in IDLInterfaceMember.DependsOnValues
# Things that are DependsOn=DeviceState are not movable, because we
# don't want them coalesced with each other or loop-hoisted, since
# their return value can change even if nothing is going on from our
# point of view.
return (affects == "Nothing" and
(dependsOn != "Everything" and dependsOn != "DeviceState"))
def aliasSet(self):
"""Returns the alias set to store in the jitinfo. This may not be the
effective alias set the JIT uses, depending on whether we have enough
information about our args to allow the JIT to prove that effectful
argument conversions won't happen.
"""
dependsOn = self.member.dependsOn
assert dependsOn in IDLInterfaceMember.DependsOnValues
if dependsOn == "Nothing" or dependsOn == "DeviceState":
assert self.member.affects == "Nothing"
return "AliasNone"
if dependsOn == "DOMState":
assert self.member.affects == "Nothing"
return "AliasDOMSets"
return "AliasEverything"
@staticmethod
def getJSReturnTypeTag(t):
if t.nullable():
# Sometimes it might return null, sometimes not
return "JSVAL_TYPE_UNKNOWN"
if t.isVoid():
# No return, every time
return "JSVAL_TYPE_UNDEFINED"
if t.isArray():
# No idea yet
assert False
if t.isSequence():
return "JSVAL_TYPE_OBJECT"
if t.isMozMap():
return "JSVAL_TYPE_OBJECT"
if t.isGeckoInterface():
return "JSVAL_TYPE_OBJECT"
if t.isString():
return "JSVAL_TYPE_STRING"
if t.isEnum():
return "JSVAL_TYPE_STRING"
if t.isCallback():
return "JSVAL_TYPE_OBJECT"
if t.isAny():
# The whole point is to return various stuff
return "JSVAL_TYPE_UNKNOWN"
if t.isObject():
return "JSVAL_TYPE_OBJECT"
if t.isSpiderMonkeyInterface():
return "JSVAL_TYPE_OBJECT"
if t.isUnion():
u = t.unroll()
if u.hasNullableType:
# Might be null or not
return "JSVAL_TYPE_UNKNOWN"
return reduce(CGMemberJITInfo.getSingleReturnType,
u.flatMemberTypes, "")
if t.isDictionary():
return "JSVAL_TYPE_OBJECT"
if t.isDate():
return "JSVAL_TYPE_OBJECT"
if not t.isPrimitive():
raise TypeError("No idea what type " + str(t) + " is.")
tag = t.tag()
if tag == IDLType.Tags.bool:
return "JSVAL_TYPE_BOOLEAN"
if tag in [IDLType.Tags.int8, IDLType.Tags.uint8,
IDLType.Tags.int16, IDLType.Tags.uint16,
IDLType.Tags.int32]:
return "JSVAL_TYPE_INT32"
if tag in [IDLType.Tags.int64, IDLType.Tags.uint64,
IDLType.Tags.unrestricted_float, IDLType.Tags.float,
IDLType.Tags.unrestricted_double, IDLType.Tags.double]:
# These all use JS_NumberValue, which can return int or double.
# But TI treats "double" as meaning "int or double", so we're
# good to return JSVAL_TYPE_DOUBLE here.
return "JSVAL_TYPE_DOUBLE"
if tag != IDLType.Tags.uint32:
raise TypeError("No idea what type " + str(t) + " is.")
# uint32 is sometimes int and sometimes double.
return "JSVAL_TYPE_DOUBLE"
@staticmethod
def getSingleReturnType(existingType, t):
type = CGMemberJITInfo.getJSReturnTypeTag(t)
if existingType == "":
# First element of the list; just return its type
return type
if type == existingType:
return existingType
if ((type == "JSVAL_TYPE_DOUBLE" and
existingType == "JSVAL_TYPE_INT32") or
(existingType == "JSVAL_TYPE_DOUBLE" and
type == "JSVAL_TYPE_INT32")):
# Promote INT32 to DOUBLE as needed
return "JSVAL_TYPE_DOUBLE"
# Different types
return "JSVAL_TYPE_UNKNOWN"
@staticmethod
def getJSArgType(t):
assert not t.isVoid()
if t.nullable():
# Sometimes it might return null, sometimes not
return "ArgType::Null as i32 | %s" % CGMemberJITInfo.getJSArgType(t.inner)
if t.isArray():
# No idea yet
assert False
if t.isSequence():
return "ArgType::Object as i32"
if t.isGeckoInterface():
return "ArgType::Object as i32"
if t.isString():
return "ArgType::String as i32"
if t.isEnum():
return "ArgType::String as i32"
if t.isCallback():
return "ArgType::Object as i32"
if t.isAny():
# The whole point is to return various stuff
return "ArgType::Any as i32"
if t.isObject():
return "ArgType::Object as i32"
if t.isSpiderMonkeyInterface():
return "ArgType::Object as i32"
if t.isUnion():
u = t.unroll()
type = "JSJitInfo::Null as i32" if u.hasNullableType else ""
return reduce(CGMemberJITInfo.getSingleArgType,
u.flatMemberTypes, type)
if t.isDictionary():
return "ArgType::Object as i32"
if t.isDate():
return "ArgType::Object as i32"
if not t.isPrimitive():
raise TypeError("No idea what type " + str(t) + " is.")
tag = t.tag()
if tag == IDLType.Tags.bool:
return "ArgType::Boolean as i32"
if tag in [IDLType.Tags.int8, IDLType.Tags.uint8,
IDLType.Tags.int16, IDLType.Tags.uint16,
IDLType.Tags.int32]:
return "ArgType::Integer as i32"
if tag in [IDLType.Tags.int64, IDLType.Tags.uint64,
IDLType.Tags.unrestricted_float, IDLType.Tags.float,
IDLType.Tags.unrestricted_double, IDLType.Tags.double]:
# These all use JS_NumberValue, which can return int or double.
# But TI treats "double" as meaning "int or double", so we're
# good to return JSVAL_TYPE_DOUBLE here.
return "ArgType::Double as i32"
if tag != IDLType.Tags.uint32:
raise TypeError("No idea what type " + str(t) + " is.")
# uint32 is sometimes int and sometimes double.
return "ArgType::Double as i32"
@staticmethod
def getSingleArgType(existingType, t):
type = CGMemberJITInfo.getJSArgType(t)
if existingType == "":
# First element of the list; just return its type
return type
if type == existingType:
return existingType
return "%s | %s" % (existingType, type)
def getEnumValueName(value):
# Some enum values can be empty strings. Others might have weird
# characters in them. Deal with the former by returning "_empty",
# deal with possible name collisions from that by throwing if the
# enum value is actually "_empty", and throw on any value
# containing non-ASCII chars for now. Replace all chars other than
# [0-9A-Za-z_] with '_'.
if re.match("[^\x20-\x7E]", value):
raise SyntaxError('Enum value "' + value + '" contains non-ASCII characters')
if re.match("^[0-9]", value):
raise SyntaxError('Enum value "' + value + '" starts with a digit')
value = re.sub(r'[^0-9A-Za-z_]', '_', value)
if re.match("^_[A-Z]|__", value):
raise SyntaxError('Enum value "' + value + '" is reserved by the C++ spec')
if value == "_empty":
raise SyntaxError('"_empty" is not an IDL enum value we support yet')
if value == "":
return "_empty"
return MakeNativeName(value)
class CGEnum(CGThing):
def __init__(self, enum):
CGThing.__init__(self)
decl = """\
#[repr(usize)]
#[derive(JSTraceable, PartialEq, Copy, Clone, HeapSizeOf)]
pub enum %s {
%s
}
""" % (enum.identifier.name, ",\n ".join(map(getEnumValueName, enum.values())))
inner = """\
use dom::bindings::conversions::ToJSValConvertible;
use js::jsapi::{JSContext, MutableHandleValue};
use js::jsval::JSVal;
pub const strings: &'static [&'static str] = &[
%s,
];
impl ToJSValConvertible for super::%s {
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
strings[*self as usize].to_jsval(cx, rval);
}
}
""" % (",\n ".join(['"%s"' % val for val in enum.values()]), enum.identifier.name)
self.cgRoot = CGList([
CGGeneric(decl),
CGNamespace.build([enum.identifier.name + "Values"],
CGIndenter(CGGeneric(inner)), public=True),
])
def define(self):
return self.cgRoot.define()
def convertConstIDLValueToRust(value):
tag = value.type.tag()
if tag in [IDLType.Tags.int8, IDLType.Tags.uint8,
IDLType.Tags.int16, IDLType.Tags.uint16,
IDLType.Tags.int32, IDLType.Tags.uint32,
IDLType.Tags.int64, IDLType.Tags.uint64,
IDLType.Tags.unrestricted_float, IDLType.Tags.float,
IDLType.Tags.unrestricted_double, IDLType.Tags.double]:
return str(value.value)
if tag == IDLType.Tags.bool:
return toStringBool(value.value)
raise TypeError("Const value of unhandled type: " + value.type)
class CGConstant(CGThing):
def __init__(self, constants):
CGThing.__init__(self)
self.constants = constants
def define(self):
def stringDecl(const):
name = const.identifier.name
value = convertConstIDLValueToRust(const.value)
return CGGeneric("pub const %s: %s = %s;\n" % (name, builtinNames[const.value.type.tag()], value))
return CGIndenter(CGList(stringDecl(m) for m in self.constants)).define()
def getUnionTypeTemplateVars(type, descriptorProvider):
# For dictionaries and sequences we need to pass None as the failureCode
# for getJSToNativeConversionInfo.
# Also, for dictionaries we would need to handle conversion of
# null/undefined to the dictionary correctly.
if type.isDictionary() or type.isSequence():
raise TypeError("Can't handle dictionaries or sequences in unions")
if type.isGeckoInterface():
name = type.inner.identifier.name
typeName = descriptorProvider.getDescriptor(name).returnType
elif type.isEnum():
name = type.inner.identifier.name
typeName = name
elif type.isArray() or type.isSequence():
name = str(type)
# XXXjdm dunno about typeName here
typeName = "/*" + type.name + "*/"
elif type.isDOMString():
name = type.name
typeName = "DOMString"
elif type.isUSVString():
name = type.name
typeName = "USVString"
elif type.isPrimitive():
name = type.name
typeName = builtinNames[type.tag()]
else:
name = type.name
typeName = "/*" + type.name + "*/"
info = getJSToNativeConversionInfo(
type, descriptorProvider, failureCode="return Ok(None);",
exceptionCode='return Err(());',
isDefinitelyObject=True)
template = info.template
assert not type.isObject()
jsConversion = string.Template(template).substitute({
"val": "value",
})
jsConversion = CGWrapper(CGGeneric(jsConversion), pre="Ok(Some(", post="))")
return {
"name": name,
"typeName": typeName,
"jsConversion": jsConversion,
}
class CGUnionStruct(CGThing):
def __init__(self, type, descriptorProvider):
assert not type.nullable()
assert not type.hasNullableType
CGThing.__init__(self)
self.type = type
self.descriptorProvider = descriptorProvider
def define(self):
templateVars = map(lambda t: getUnionTypeTemplateVars(t, self.descriptorProvider),
self.type.flatMemberTypes)
enumValues = [
" e%s(%s)," % (v["name"], v["typeName"]) for v in templateVars
]
enumConversions = [
" %s::e%s(ref inner) => inner.to_jsval(cx, rval),"
% (self.type, v["name"]) for v in templateVars
]
return ("""\
pub enum %s {
%s
}
impl ToJSValConvertible for %s {
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
match *self {
%s
}
}
}
""") % (self.type, "\n".join(enumValues), self.type, "\n".join(enumConversions))
class CGUnionConversionStruct(CGThing):
def __init__(self, type, descriptorProvider):
assert not type.nullable()
assert not type.hasNullableType
CGThing.__init__(self)
self.type = type
self.descriptorProvider = descriptorProvider
def from_jsval(self):
memberTypes = self.type.flatMemberTypes
names = []
conversions = []
interfaceMemberTypes = filter(lambda t: t.isNonCallbackInterface(), memberTypes)
if len(interfaceMemberTypes) > 0:
def get_name(memberType):
if self.type.isGeckoInterface():
return memberType.inner.identifier.name
return memberType.name
def get_match(name):
return (
"match %s::TryConvertTo%s(cx, value) {\n"
" Err(_) => return Err(()),\n"
" Ok(Some(value)) => return Ok(%s::e%s(value)),\n"
" Ok(None) => (),\n"
"}\n") % (self.type, name, self.type, name)
typeNames = [get_name(memberType) for memberType in interfaceMemberTypes]
interfaceObject = CGList(CGGeneric(get_match(typeName)) for typeName in typeNames)
names.extend(typeNames)
else:
interfaceObject = None
arrayObjectMemberTypes = filter(lambda t: t.isArray() or t.isSequence(), memberTypes)
| if len(arrayObjectMemberTypes) > 0: | 12,254 | lcc_e | python | null | ddbfe085c46c1c7882246aeb0e735bb4d4f1c449cbfc2851 |
|
# module pyparsing.py
#
# Copyright (c) 2003-2009 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#from __future__ import generators
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form "<salutation>, <addressee>!")::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from parseString() can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.5.2"
__versionTime__ = "17 February 2009 19:45"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor',
]
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
if sys.version_info[0] > 2:
_PY3K = True
_MAX_INT = sys.maxsize
basestring = str
else:
_PY3K = False
_MAX_INT = sys.maxint
if not _PY3K:
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
else:
_ustr = str
unichr = chr
if not _PY3K:
def _str2dict(strg):
return dict( [(c,0) for c in strg] )
else:
_str2dict = set
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ['&'+s+';' for s in "amp gt lt quot apos".split()]
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
if not _PY3K:
alphas = string.lowercase + string.uppercase
else:
alphas = string.ascii_lowercase + string.ascii_uppercase
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputLine __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like ParseFatalException, but thrown internally when an
ErrorStop indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by validate() if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (len(results))
- by list index (results[0], results[1], etc.)
- by attribute (results.<resultsName>)
"""
__slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( reversed(self.__toklist) )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop( self, index=-1 ):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given defaultValue or None if no
defaultValue is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if name not in self.__slots__:
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
del other
return self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + _ustr(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a ParseResults object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a ParseResults.
Accepts an optional indent argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
#~ out.append('\n')
out.append( v.dump(indent,depth+1) )
#~ out.append('\n')
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
#~ out.append('\n')
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
self.__tokdict, \
par, \
inAccumNames, \
self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return dir(super(ParseResults,self)) + self.keys()
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR > 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this ParserElement. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original ParserElement object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
"""
newself = self.copy()
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set breakFlag to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def _normalizeParseActionArgs( f ):
"""Internal method used to decorate parse actions that take fewer than 3 arguments,
so that all parse actions can be called as f(s,l,t)."""
STAR_ARGS = 4
try:
restore = None
if isinstance(f,type):
restore = f
f = f.__init__
if not _PY3K:
codeObj = f.func_code
else:
codeObj = f.code
if codeObj.co_flags & STAR_ARGS:
return f
numargs = codeObj.co_argcount
if not _PY3K:
if hasattr(f,"im_self"):
numargs -= 1
else:
if hasattr(f,"__self__"):
numargs -= 1
if restore:
f = restore
except AttributeError:
try:
if not _PY3K:
call_im_func_code = f.__call__.im_func.func_code
else:
call_im_func_code = f.__code__
# not a function, must be a callable object, get info from the
# im_func binding of its bound __call__ method
if call_im_func_code.co_flags & STAR_ARGS:
return f
numargs = call_im_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 0
except AttributeError:
if not _PY3K:
call_func_code = f.__call__.func_code
else:
call_func_code = f.__call__.__code__
# not a bound method, get info directly from __call__ method
if call_func_code.co_flags & STAR_ARGS:
return f
numargs = call_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 1
#~ print ("adding function %s with %d args" % (f.func_name,numargs))
if numargs == 3:
return f
else:
if numargs > 3:
def tmp(s,l,t):
return f(f.__call__.__self__, s,l,t)
if numargs == 2:
def tmp(s,l,t):
return f(l,t)
elif numargs == 1:
def tmp(s,l,t):
return f(t)
else: #~ numargs == 0:
def tmp(s,l,t):
return f()
try:
tmp.__name__ = f.__name__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__doc__ = f.__doc__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__dict__.update(f.__dict__)
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
return tmp
_normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs)
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as fn(s,loc,toks),
fn(loc,toks), fn(toks), or just fn(), where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
fn(s,loc,expr,err) where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw ParseFatalException
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = loc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException, err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = loc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException, err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value,Exception):
raise value
return value
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException, pe:
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method ParserElement.enablePackrat(). If
your program uses psyco to "compile as you go", you must call
enablePackrat before calling psyco.full(). If you do not do this,
Python will crash. For best results, call enablePackrat() immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set parseAll to True (equivalent to ending
the grammar with StringEnd()).
Note: parseString implicitly calls expandtabs() on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the loc argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling parseWithTabs on your grammar before calling parseString
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full (s,loc,toks) signature, and
reference the input string using the parse action's s argument
- explictly expand the tabs in your input string before calling
parseString
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
StringEnd()._parse( instring, loc )
except ParseBaseException, exc:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
maxMatches argument, to clip scanning after 'n' matches are found.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
matches += 1
yield tokens, preloc, nextLoc
loc = nextLoc
except ParseBaseException, pe:
raise pe
def transformString( self, instring ):
"""Extension to scanString, to modify matching text with modified tokens that may
be returned from a parse action. To use transformString, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking transformString() on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. transformString() returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,out))
except ParseBaseException, pe:
raise pe
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to scanString, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
maxMatches argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException, pe:
raise pe
def __add__(self, other ):
"""Implementation of + operator - returns And"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a ParserElement"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns And with error stop"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a ParserElement"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns MatchFirst"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a ParserElement"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns Or"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a ParserElement"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns Each"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a ParserElement"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns NotAny"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for setResultsName, with listAllMatches=default::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this ParserElement; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
ParserElement's defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand <TAB>s to spaces before parsing the input string.
Must be called before parseString when the input grammar contains elements that
match <TAB> characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other )
else:
self.ignoreExprs.append( Suppress( other ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set flag to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException, exc:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def getException(self):
return ParseException("",0,self.errmsg,self)
def __getattr__(self,aname):
if aname == "myException":
self.myException = ret = self.getException();
return ret;
else:
raise AttributeError("no such attribute " + aname)
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract ParserElement subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
#self.myException = ParseException("",0,"",self)
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
#s.myException.msg = self.errmsg
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with Literal::
Literal("if") will match the leading 'if' in 'ifAndOnlyIf'.
Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)'
Accepts two optional constructor arguments in addition to the keyword string:
identChars is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; caseless allows case-insensitive
matching, default is False.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = _str2dict(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for min is 1 (a
minimum value < 1 is not valid); the default values for max and exact
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ):
super(Word,self).__init__()
self.initCharsOrig = initChars
self.initChars = _str2dict(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = _str2dict(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = _str2dict(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc,result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
def __init__( self, pattern, flags=0):
"""The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for min is 1 (a
minimum value < 1 is not valid); the default values for max and exact
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
#self.myException.msg = self.errmsg
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is " \\t\\r\\n". Also takes optional min, max, and exact arguments,
as defined for the Word class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
#self.myException.msg = self.errmsg
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of wordChars
(default=printables). To emulate the \b behavior of regular expressions,
use WordStart(alphanums). WordStart will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = _str2dict(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of wordChars
(default=printables). To emulate the \b behavior of regular expressions,
use WordEnd(alphanums). WordEnd will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = _str2dict(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
#~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
class And(ParseExpression):
"""Requires all given ParseExpressions to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the '+' operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(Empty,self).__init__(*args, **kwargs)
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException, pe:
raise ParseSyntaxException(pe)
except IndexError, ie:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the '^' operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the '|' operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given ParseExpressions to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the '&' operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
self.optionals = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += list(e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt)
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. FollowedBy
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. FollowedBy always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. NotAny
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, NotAny does *not* skip over leading whitespace. NotAny
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If include is set to true, the matched expression is also parsed (the skipped text
and matched expression are returned as a 2-element list). The ignore
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if failOn is not None and isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
print "found ignoreExpr, advance to", loc
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,IndexError):
if failParse:
raise
else:
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the Forward variable using the '<<' operator.
Note: take care when assigning to Forward not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the Forward::
fwdExpr << (a | b | c)
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of ParseExpression, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return list(map( string.upper, tokenlist ))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying 'adjacent=False' in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
| tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) | 11,936 | lcc_e | python | null | d7dd4f7df2a201bc16d5be4579c3fe4469912e1cbfa0868f |
|
#!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
import six
from six import items, itervalues
from six.moves import range
_USAGE = """
Syntax: cpp_lint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuing that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_dir',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'caffe/alt_fn',
'caffe/data_layer_setup',
'caffe/random_fn',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overrided by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = [
'-build/include_dir',
'-readability/todo',
]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# Finds occurrences of NOLINT[_NEXT_LINE] or NOLINT[_NEXT_LINE](...).
_RE_SUPPRESSION = re.compile(r'\bNOLINT(_NEXT_LINE)?\b(\([^)]*\))?')
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# Finds Copyright.
_RE_COPYRIGHT = re.compile(r'Copyright')
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['cc', 'h', 'cpp', 'hpp', 'cu', 'cuh'])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
# FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
matched = _RE_SUPPRESSION.search(raw_line)
if matched:
if matched.group(1) == '_NEXT_LINE':
linenum += 1
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(linenum)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(linenum)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"Resets the set of NOLINT suppressions to empty."
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
dict.__init__(self)
self.ResetSection()
def ResetSection(self):
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in items(self.errors_by_category):
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = ''
else:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
"""Find the position just after the matching endchar.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
depth: nesting level at startpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching endchar: (index just after matching endchar, 0)
Otherwise: (-1, new depth at end of this line)
"""
for i in range(startpos, len(line)):
if line[i] == startchar:
depth += 1
elif line[i] == endchar:
depth -= 1
if depth == 0:
return (i + 1, 0)
return (-1, depth)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
startchar = line[pos]
if startchar not in '({[<':
return (line, clean_lines.NumLines(), -1)
if startchar == '(': endchar = ')'
if startchar == '[': endchar = ']'
if startchar == '{': endchar = '}'
if startchar == '<': endchar = '>'
# Check first line
(end_pos, num_open) = FindEndOfExpressionInLine(
line, pos, 0, startchar, endchar)
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, num_open) = FindEndOfExpressionInLine(
line, 0, num_open, startchar, endchar)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find endchar before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar):
"""Find position at the matching startchar.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
depth: nesting level at endpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching startchar: (index at matching startchar, 0)
Otherwise: (-1, new depth at beginning of this line)
"""
for i in range(endpos, -1, -1):
if line[i] == endchar:
depth += 1
elif line[i] == startchar:
depth -= 1
if depth == 0:
return (i, 0)
return (-1, depth)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
endchar = line[pos]
if endchar not in ')}]>':
return (line, 0, -1)
if endchar == ')': startchar = '('
if endchar == ']': startchar = '['
if endchar == '}': startchar = '{'
if endchar == '>': startchar = '<'
# Check last line
(start_pos, num_open) = FindStartOfExpressionInLine(
line, pos, 0, startchar, endchar)
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, num_open) = FindStartOfExpressionInLine(
line, len(line) - 1, num_open, startchar, endchar)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find startchar before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if a Copyright message appears at the top of the file."""
# We'll check up to line 10. Don't forget there's a
# dummy line at the front.
for line in range(1, min(len(lines), 11)):
if _RE_COPYRIGHT.search(lines[line], re.I):
error(filename, 0, 'legal/copyright', 5,
'Copyright message found. '
'You should not include a copyright line.')
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
caffe_alt_function_list = (
('memset', ['caffe_set', 'caffe_memset']),
('cudaMemset', ['caffe_gpu_set', 'caffe_gpu_memset']),
('memcpy', ['caffe_copy']),
('cudaMemcpy', ['caffe_copy', 'caffe_gpu_memcpy']),
)
def CheckCaffeAlternatives(filename, clean_lines, linenum, error):
"""Checks for C(++) functions for which a Caffe substitute should be used.
For certain native C functions (memset, memcpy), there is a Caffe alternative
which should be used instead.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for function, alts in caffe_alt_function_list:
ix = line.find(function + '(')
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
disp_alts = ['%s(...)' % alt for alt in alts]
error(filename, linenum, 'caffe/alt_fn', 2,
'Use Caffe function %s instead of %s(...).' %
(' or '.join(disp_alts), function))
def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error):
"""Except the base classes, Caffe DataLayer should define DataLayerSetUp
instead of LayerSetUp.
The base DataLayers define common SetUp steps, the subclasses should
not override them.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
ix = line.find('DataLayer<Dtype>::LayerSetUp')
if ix >= 0 and (
line.find('void DataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void ImageDataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void MemoryDataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void WindowDataLayer<Dtype>::LayerSetUp') != -1):
error(filename, linenum, 'caffe/data_layer_setup', 2,
'Except the base classes, Caffe DataLayer should define'
+ ' DataLayerSetUp instead of LayerSetUp. The base DataLayers'
+ ' define common SetUp steps, the subclasses should'
+ ' not override them.')
ix = line.find('DataLayer<Dtype>::DataLayerSetUp')
if ix >= 0 and (
line.find('void Base') == -1 and
line.find('void DataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void ImageDataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void MemoryDataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void WindowDataLayer<Dtype>::DataLayerSetUp') == -1):
error(filename, linenum, 'caffe/data_layer_setup', 2,
'Except the base classes, Caffe DataLayer should define'
+ ' DataLayerSetUp instead of LayerSetUp. The base DataLayers'
+ ' define common SetUp steps, the subclasses should'
+ ' not override them.')
c_random_function_list = (
'rand(',
'rand_r(',
'random(',
)
def CheckCaffeRandom(filename, clean_lines, linenum, error):
"""Checks for calls to C random functions (rand, rand_r, random, ...).
Caffe code should (almost) always use the caffe_rng_* functions rather
than these, as the internal state of these C functions is independent of the
native Caffe RNG system which should produce deterministic results for a
fixed Caffe seed set using Caffe::set_random_seed(...).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for function in c_random_function_list:
ix = line.find(function)
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'caffe/random_fn', 2,
'Use caffe_rng_rand() (or other caffe_rng_* function) instead of '
+ function +
') to ensure results are deterministic for a fixed Caffe seed.')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_function, multithread_safe_function in threading_list:
ix = line.find(single_thread_function)
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
initial_indent = Match(r'^( *)\S', clean_lines.raw_lines[linenum])
if initial_indent:
self.class_indent = len(initial_indent.group(1))
else:
self.class_indent = 0
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class _NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Update pp_stack first
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
#
# Templates with class arguments may confuse the parser, for example:
# template <class T
# class Comparator = less<T>,
# class Vector = vector<T> >
# class HeapQueue {
#
# Because this parser has no nesting state about templates, by the
# time it saw "class Comparator", it may think that it's a new class.
# Nested templates have a similar problem:
# template <
# typename ExportedType,
# typename TupleType,
# template <typename, typename> class ImplTemplate>
#
# To avoid these cases, we ignore classes that are followed by '=' or '>'
class_decl_match = Match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
self.stack.append(_ClassInfo(
class_decl_match.group(4), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(5)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args and
args.group(1) != 'void' and
not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), args.group(1).strip())):
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
raw = clean_lines.raw_lines
raw_line = raw[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in range(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(comment, filename, linenum, error):
"""Checks for common mistakes in TODO comments.
Args:
comment: The text of the comment from the line in question.
filename: The name of the current file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_EVIL_CONSTRUCTORS|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
"""Find the corresponding > to close a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_suffix: Remainder of the current line after the initial <.
Returns:
True if a matching bracket exists.
"""
line = init_suffix
nesting_stack = ['<']
while True:
# Find the next operator that can tell us whether < is used as an
# opening bracket or as a less-than operator. We only want to
# warn on the latter case.
#
# We could also check all other operators and terminate the search
# early, e.g. if we got something like this "a<b+c", the "<" is
# most likely a less-than operator, but then we will get false
# positives for default arguments and other template expressions.
match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(1)
line = match.group(2)
if nesting_stack[-1] == '<':
# Expecting closing angle bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator == '>':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma after a bracket, this is most likely a template
# argument. We have not seen a closing angle bracket yet, but
# it's probably a few lines later if we look for it, so just
# return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting closing parenthesis or closing bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator in (')', ']'):
# We don't bother checking for matching () or []. If we got
# something like (] or [), it would have been a syntax error.
nesting_stack.pop()
else:
# Scan the next line
linenum += 1
if linenum >= len(clean_lines.elided):
break
line = clean_lines.elided[linenum]
# Exhausted all remaining lines and still no matching angle bracket.
# Most likely the input was incomplete, otherwise we should have
# seen a semicolon and returned early.
return True
def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
"""Find the corresponding < that started a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_prefix: Part of the current line before the initial >.
Returns:
True if a matching bracket exists.
"""
line = init_prefix
nesting_stack = ['>']
while True:
# Find the previous operator
match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(2)
line = match.group(1)
if nesting_stack[-1] == '>':
# Expecting opening angle bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator == '<':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma before a bracket, this is most likely a
# template argument. The opening angle bracket is probably
# there if we look for it, so just return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting opening parenthesis or opening bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator in ('(', '['):
nesting_stack.pop()
else:
# Scan the previous line
linenum -= 1
if linenum < 0:
break
line = clean_lines.elided[linenum]
# Exhausted all earlier lines and still no matching angle bracket.
return False
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
if IsBlankLine(line) and not nesting_state.InNamespaceBody():
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, we complain if there's a comment too near the text
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not Match(r'^\s*{ //', line) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# There should always be a space between the // and the comment
commentend = commentpos + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or are an empty C++ style Doxygen comment, like:
# ///
# or C++ style Doxygen comments placed after the variable:
# ///< Header comment
# //!< Header comment
# or they begin with multiple slashes followed by a space:
# //////// Header comment
match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
Search(r'^/$', line[commentend:]) or
Search(r'^!< ', line[commentend:]) or
Search(r'^/< ', line[commentend:]) or
Search(r'^/+ ', line[commentend:]))
if not match:
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
# Also ignore using ns::operator<<;
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
if (match and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
elif not Match(r'#.*include', line):
# Avoid false positives on ->
reduced_line = line.replace('->', '')
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
if (match and
not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
if (match and
not FindPreviousMatchingAngleBracket(clean_lines, linenum,
match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<]".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in range(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search('for *\(.*[^:]:[^: ]', line) or
Search('for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
if endline[endpos:].find('{') == -1: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
else: # common case: else not followed by a multi-line if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
Search(r'\s+=\s*$', line_prefix)):
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
check_macro = None
start_pos = -1
for macro in _CHECK_MACROS:
i = lines[linenum].find(macro)
if i >= 0:
check_macro = macro
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum])
if not matched:
continue
start_pos = len(matched.group(1))
break
if not check_macro or start_pos < 0:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in range(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')')
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if six.PY2:
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for section labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include_dir', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(itervalues(matching_punctuation))
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line):
include_state.ResetSection()
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# TODO(unknown): figure out if they're using default arguments in fn proto.
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
if match:
matched_new = match.group(1)
matched_type = match.group(2)
matched_funcptr = match.group(3)
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts. Likewise, gMock's
# MockCallback takes a template parameter of the form return_type(arg_type),
# which looks much like the cast we're trying to detect.
#
# std::function<> wrapper has a similar problem.
#
# Return types for function pointers also look like casts if they
# don't have an extra space.
if (matched_new is None and # If new operator, then this isn't a cast
not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
Search(r'\bMockCallback<.*>', line) or
Search(r'\bstd::function<.*>', line)) and
not (matched_funcptr and
Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr))):
# Try a bit harder to catch gmock lines: the only place where
# something looks like an old-style cast is where we declare the
# return type of the mocked method, and the only time when we
# are missing context is if MOCK_METHOD was split across
# multiple lines. The missing MOCK_METHOD is usually one or two
# lines back, so scan back one or two lines.
#
# It's not possible for gmock macros to appear in the first 2
# lines, since the class head + section name takes up 2 lines.
if (linenum < 2 or
not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]))):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
match = Search(
r'(?:&\(([^)]+)\)[\w(])|'
r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Create an extended_line, which is the concatenation of the current and
# next lines, for more effective checking of code that may span more than one
# line.
if linenum + 1 < clean_lines.NumLines():
extended_line = line + clean_lines.elided[linenum + 1]
else:
extended_line = line
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
#
# Also ignore things that look like operators. These are matched separately
# because operator names cross non-word boundaries. If we change the pattern
# above, we would decrease the accuracy of matching identifiers.
if (match and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(sugawarayu): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in range(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknwon): Doesn't account for preprocessor directives.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
check_params = False
if not nesting_state.stack:
check_params = True # top level
elif (isinstance(nesting_state.stack[-1], _ClassInfo) or
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
check_params = True # within class or namespace
elif Match(r'.*{\s*$', line):
if (len(nesting_state.stack) == 1 or
isinstance(nesting_state.stack[-2], _ClassInfo) or
isinstance(nesting_state.stack[-2], _NamespaceInfo)):
check_params = True # just opened global/class/namespace block
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
check_params = False
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in range(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
check_params = False
break
if check_params:
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
match = Search(pattern, line)
if not match:
return False
# Exclude lines with sizeof, since sizeof looks like a cast.
sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
if sizeof_match:
return False
# operator++(int) and operator--(int)
if (line[0:match.start(1) - 1].endswith(' operator++') or
line[0:match.start(1) - 1].endswith(' operator--')):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
| if matched_zero and matched_zero.group(1) != '0': | 20,316 | lcc_e | python | null | f08a124a114577d3ad44ab0eb8109ad143afa1725e636790 |
|
#!/usr/bin/env python
#
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Mercurial interface to codereview.appspot.com.
To configure, set the following options in
your repository's .hg/hgrc file.
[extensions]
codereview = path/to/codereview.py
[codereview]
server = codereview.appspot.com
The server should be running Rietveld; see http://code.google.com/p/rietveld/.
In addition to the new commands, this extension introduces
the file pattern syntax @nnnnnn, where nnnnnn is a change list
number, to mean the files included in that change list, which
must be associated with the current client.
For example, if change 123456 contains the files x.go and y.go,
"hg diff @123456" is equivalent to"hg diff x.go y.go".
'''
from mercurial import cmdutil, commands, hg, util, error, match
from mercurial.node import nullrev, hex, nullid, short
import os, re, time
import stat
import subprocess
import threading
from HTMLParser import HTMLParser
try:
from xml.etree import ElementTree as ET
except:
from elementtree import ElementTree as ET
try:
hgversion = util.version()
except:
from mercurial.version import version as v
hgversion = v.get_version()
oldMessage = """
The code review extension requires Mercurial 1.3 or newer.
To install a new Mercurial,
sudo easy_install mercurial
works on most systems.
"""
linuxMessage = """
You may need to clear your current Mercurial installation by running:
sudo apt-get remove mercurial mercurial-common
sudo rm -rf /etc/mercurial
"""
if hgversion < '1.3':
msg = oldMessage
if os.access("/etc/mercurial", 0):
msg += linuxMessage
raise util.Abort(msg)
def promptyesno(ui, msg):
# Arguments to ui.prompt changed between 1.3 and 1.3.1.
# Even so, some 1.3.1 distributions seem to have the old prompt!?!?
# What a terrible way to maintain software.
try:
return ui.promptchoice(msg, ["&yes", "&no"], 0) == 0
except AttributeError:
return ui.prompt(msg, ["&yes", "&no"], "y") != "n"
# To experiment with Mercurial in the python interpreter:
# >>> repo = hg.repository(ui.ui(), path = ".")
#######################################################################
# Normally I would split this into multiple files, but it simplifies
# import path headaches to keep it all in one file. Sorry.
import sys
if __name__ == "__main__":
print >>sys.stderr, "This is a Mercurial extension and should not be invoked directly."
sys.exit(2)
server = "codereview.appspot.com"
server_url_base = None
defaultcc = None
#######################################################################
# Change list parsing.
#
# Change lists are stored in .hg/codereview/cl.nnnnnn
# where nnnnnn is the number assigned by the code review server.
# Most data about a change list is stored on the code review server
# too: the description, reviewer, and cc list are all stored there.
# The only thing in the cl.nnnnnn file is the list of relevant files.
# Also, the existence of the cl.nnnnnn file marks this repository
# as the one where the change list lives.
emptydiff = """Index: ~rietveld~placeholder~
===================================================================
diff --git a/~rietveld~placeholder~ b/~rietveld~placeholder~
new file mode 100644
"""
class CL(object):
def __init__(self, name):
self.name = name
self.desc = ''
self.files = []
self.reviewer = []
self.cc = []
self.url = ''
self.local = False
self.web = False
self.copied_from = None # None means current user
self.mailed = False
def DiskText(self):
cl = self
s = ""
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n\n"
s += "Mailed: " + str(self.mailed) + "\n"
s += "Description:\n"
s += Indent(cl.desc, "\t")
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
return s
def EditorText(self):
cl = self
s = _change_prolog
s += "\n"
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n"
if cl.url != '':
s += 'URL: ' + cl.url + ' # cannot edit\n\n'
s += "Reviewer: " + JoinComma(cl.reviewer) + "\n"
s += "CC: " + JoinComma(cl.cc) + "\n"
s += "\n"
s += "Description:\n"
if cl.desc == '':
s += "\t<enter description here>\n"
else:
s += Indent(cl.desc, "\t")
s += "\n"
if cl.local or cl.name == "new":
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
s += "\n"
return s
def PendingText(self):
cl = self
s = cl.name + ":" + "\n"
s += Indent(cl.desc, "\t")
s += "\n"
if cl.copied_from:
s += "\tAuthor: " + cl.copied_from + "\n"
s += "\tReviewer: " + JoinComma(cl.reviewer) + "\n"
s += "\tCC: " + JoinComma(cl.cc) + "\n"
s += "\tFiles:\n"
for f in cl.files:
s += "\t\t" + f + "\n"
return s
def Flush(self, ui, repo):
if self.name == "new":
self.Upload(ui, repo, gofmt_just_warn=True)
dir = CodeReviewDir(ui, repo)
path = dir + '/cl.' + self.name
f = open(path+'!', "w")
f.write(self.DiskText())
f.close()
if sys.platform == "win32" and os.path.isfile(path):
os.remove(path)
os.rename(path+'!', path)
if self.web and not self.copied_from:
EditDesc(self.name, desc=self.desc,
reviewers=JoinComma(self.reviewer), cc=JoinComma(self.cc))
def Delete(self, ui, repo):
dir = CodeReviewDir(ui, repo)
os.unlink(dir + "/cl." + self.name)
def Subject(self):
s = line1(self.desc)
if len(s) > 60:
s = s[0:55] + "..."
if self.name != "new":
s = "code review %s: %s" % (self.name, s)
return s
def Upload(self, ui, repo, send_mail=False, gofmt=True, gofmt_just_warn=False):
if not self.files:
ui.warn("no files in change list\n")
if ui.configbool("codereview", "force_gofmt", True) and gofmt:
CheckGofmt(ui, repo, self.files, just_warn=gofmt_just_warn)
os.chdir(repo.root)
form_fields = [
("content_upload", "1"),
("reviewers", JoinComma(self.reviewer)),
("cc", JoinComma(self.cc)),
("description", self.desc),
("base_hashes", ""),
# Would prefer not to change the subject
# on reupload, but /upload requires it.
("subject", self.Subject()),
]
# NOTE(rsc): This duplicates too much of RealMain,
# but RealMain doesn't have the most reusable interface.
if self.name != "new":
form_fields.append(("issue", self.name))
vcs = None
if self.files:
vcs = GuessVCS(upload_options)
data = vcs.GenerateDiff(self.files)
files = vcs.GetBaseFiles(data)
if len(data) > MAX_UPLOAD_SIZE:
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
else:
uploaded_diff_file = [("data", "data.diff", emptydiff)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = MySend("/upload", body, content_type=ctype)
patchset = None
msg = response_body
lines = msg.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
ui.status(msg + "\n")
if not response_body.startswith("Issue created.") and not response_body.startswith("Issue updated."):
raise util.Abort("failed to update issue: " + response_body)
issue = msg[msg.rfind("/")+1:]
self.name = issue
if not self.url:
self.url = server_url_base + self.name
if not uploaded_diff_file:
patches = UploadSeparatePatches(issue, rpc, patchset, data, upload_options)
if vcs:
vcs.UploadBaseFiles(issue, rpc, patches, patchset, upload_options, files)
if send_mail:
MySend("/" + issue + "/mail", payload="")
self.web = True
self.Flush(ui, repo)
return
def Mail(self, ui,repo):
pmsg = "Hello " + JoinComma(self.reviewer)
if self.cc:
pmsg += " (cc: %s)" % (', '.join(self.cc),)
pmsg += ",\n"
pmsg += "\n"
if not self.mailed:
pmsg += "I'd like you to review this change.\n"
else:
pmsg += "Please take another look.\n"
PostMessage(ui, self.name, pmsg, subject=self.Subject())
self.mailed = True
self.Flush(ui, repo)
def GoodCLName(name):
return re.match("^[0-9]+$", name)
def ParseCL(text, name):
sname = None
lineno = 0
sections = {
'Author': '',
'Description': '',
'Files': '',
'URL': '',
'Reviewer': '',
'CC': '',
'Mailed': '',
}
for line in text.split('\n'):
lineno += 1
line = line.rstrip()
if line != '' and line[0] == '#':
continue
if line == '' or line[0] == ' ' or line[0] == '\t':
if sname == None and line != '':
return None, lineno, 'text outside section'
if sname != None:
sections[sname] += line + '\n'
continue
p = line.find(':')
if p >= 0:
s, val = line[:p].strip(), line[p+1:].strip()
if s in sections:
sname = s
if val != '':
sections[sname] += val + '\n'
continue
return None, lineno, 'malformed section header'
for k in sections:
sections[k] = StripCommon(sections[k]).rstrip()
cl = CL(name)
if sections['Author']:
cl.copied_from = sections['Author']
cl.desc = sections['Description']
for line in sections['Files'].split('\n'):
i = line.find('#')
if i >= 0:
line = line[0:i].rstrip()
if line == '':
continue
cl.files.append(line)
cl.reviewer = SplitCommaSpace(sections['Reviewer'])
cl.cc = SplitCommaSpace(sections['CC'])
cl.url = sections['URL']
if sections['Mailed'] != 'False':
# Odd default, but avoids spurious mailings when
# reading old CLs that do not have a Mailed: line.
# CLs created with this update will always have
# Mailed: False on disk.
cl.mailed = True
if cl.desc == '<enter description here>':
cl.desc = ''
return cl, 0, ''
def SplitCommaSpace(s):
s = s.strip()
if s == "":
return []
return re.split(", *", s)
def CutDomain(s):
i = s.find('@')
if i >= 0:
s = s[0:i]
return s
def JoinComma(l):
return ", ".join(l)
def ExceptionDetail():
s = str(sys.exc_info()[0])
if s.startswith("<type '") and s.endswith("'>"):
s = s[7:-2]
elif s.startswith("<class '") and s.endswith("'>"):
s = s[8:-2]
arg = str(sys.exc_info()[1])
if len(arg) > 0:
s += ": " + arg
return s
def IsLocalCL(ui, repo, name):
return GoodCLName(name) and os.access(CodeReviewDir(ui, repo) + "/cl." + name, 0)
# Load CL from disk and/or the web.
def LoadCL(ui, repo, name, web=True):
if not GoodCLName(name):
return None, "invalid CL name"
dir = CodeReviewDir(ui, repo)
path = dir + "cl." + name
if os.access(path, 0):
ff = open(path)
text = ff.read()
ff.close()
cl, lineno, err = ParseCL(text, name)
if err != "":
return None, "malformed CL data: "+err
cl.local = True
else:
cl = CL(name)
if web:
try:
f = GetSettings(name)
except:
return None, "cannot load CL %s from code review server: %s" % (name, ExceptionDetail())
if 'reviewers' not in f:
return None, "malformed response loading CL data from code review server"
cl.reviewer = SplitCommaSpace(f['reviewers'])
cl.cc = SplitCommaSpace(f['cc'])
if cl.local and cl.copied_from and cl.desc:
# local copy of CL written by someone else
# and we saved a description. use that one,
# so that committers can edit the description
# before doing hg submit.
pass
else:
cl.desc = f['description']
cl.url = server_url_base + name
cl.web = True
return cl, ''
class LoadCLThread(threading.Thread):
def __init__(self, ui, repo, dir, f, web):
threading.Thread.__init__(self)
self.ui = ui
self.repo = repo
self.dir = dir
self.f = f
self.web = web
self.cl = None
def run(self):
cl, err = LoadCL(self.ui, self.repo, self.f[3:], web=self.web)
if err != '':
self.ui.warn("loading "+self.dir+self.f+": " + err + "\n")
return
self.cl = cl
# Load all the CLs from this repository.
def LoadAllCL(ui, repo, web=True):
dir = CodeReviewDir(ui, repo)
m = {}
files = [f for f in os.listdir(dir) if f.startswith('cl.')]
if not files:
return m
active = []
first = True
for f in files:
t = LoadCLThread(ui, repo, dir, f, web)
t.start()
if web and first:
# first request: wait in case it needs to authenticate
# otherwise we get lots of user/password prompts
# running in parallel.
t.join()
if t.cl:
m[t.cl.name] = t.cl
first = False
else:
active.append(t)
for t in active:
t.join()
if t.cl:
m[t.cl.name] = t.cl
return m
# Find repository root. On error, ui.warn and return None
def RepoDir(ui, repo):
url = repo.url();
if not url.startswith('file:'):
ui.warn("repository %s is not in local file system\n" % (url,))
return None
url = url[5:]
if url.endswith('/'):
url = url[:-1]
return url
# Find (or make) code review directory. On error, ui.warn and return None
def CodeReviewDir(ui, repo):
dir = RepoDir(ui, repo)
if dir == None:
return None
dir += '/.hg/codereview/'
if not os.path.isdir(dir):
try:
os.mkdir(dir, 0700)
except:
ui.warn('cannot mkdir %s: %s\n' % (dir, ExceptionDetail()))
return None
return dir
# Strip maximal common leading white space prefix from text
def StripCommon(text):
ws = None
for line in text.split('\n'):
line = line.rstrip()
if line == '':
continue
white = line[:len(line)-len(line.lstrip())]
if ws == None:
ws = white
else:
common = ''
for i in range(min(len(white), len(ws))+1):
if white[0:i] == ws[0:i]:
common = white[0:i]
ws = common
if ws == '':
break
if ws == None:
return text
t = ''
for line in text.split('\n'):
line = line.rstrip()
if line.startswith(ws):
line = line[len(ws):]
if line == '' and t == '':
continue
t += line + '\n'
while len(t) >= 2 and t[-2:] == '\n\n':
t = t[:-1]
return t
# Indent text with indent.
def Indent(text, indent):
t = ''
for line in text.split('\n'):
t += indent + line + '\n'
return t
# Return the first line of l
def line1(text):
return text.split('\n')[0]
_change_prolog = """# Change list.
# Lines beginning with # are ignored.
# Multi-line values should be indented.
"""
#######################################################################
# Mercurial helper functions
# Return list of changed files in repository that match pats.
def ChangedFiles(ui, repo, pats, opts):
# Find list of files being operated on.
matcher = cmdutil.match(repo, pats, opts)
node1, node2 = cmdutil.revpair(repo, None)
modified, added, removed = repo.status(node1, node2, matcher)[:3]
l = modified + added + removed
l.sort()
return l
# Return list of changed files in repository that match pats and still exist.
def ChangedExistingFiles(ui, repo, pats, opts):
matcher = cmdutil.match(repo, pats, opts)
node1, node2 = cmdutil.revpair(repo, None)
modified, added, _ = repo.status(node1, node2, matcher)[:3]
l = modified + added
l.sort()
return l
# Return list of files claimed by existing CLs
def TakenFiles(ui, repo):
return Taken(ui, repo).keys()
def Taken(ui, repo):
all = LoadAllCL(ui, repo, web=False)
taken = {}
for _, cl in all.items():
for f in cl.files:
taken[f] = cl
return taken
# Return list of changed files that are not claimed by other CLs
def DefaultFiles(ui, repo, pats, opts):
return Sub(ChangedFiles(ui, repo, pats, opts), TakenFiles(ui, repo))
def Sub(l1, l2):
return [l for l in l1 if l not in l2]
def Add(l1, l2):
l = l1 + Sub(l2, l1)
l.sort()
return l
def Intersect(l1, l2):
return [l for l in l1 if l in l2]
def getremote(ui, repo, opts):
# save $http_proxy; creating the HTTP repo object will
# delete it in an attempt to "help"
proxy = os.environ.get('http_proxy')
source = hg.parseurl(ui.expandpath("default"), None)[0]
other = hg.repository(cmdutil.remoteui(repo, opts), source)
if proxy is not None:
os.environ['http_proxy'] = proxy
return other
def Incoming(ui, repo, opts):
_, incoming, _ = repo.findcommonincoming(getremote(ui, repo, opts))
return incoming
def EditCL(ui, repo, cl):
s = cl.EditorText()
while True:
s = ui.edit(s, ui.username())
clx, line, err = ParseCL(s, cl.name)
if err != '':
if not promptyesno(ui, "error parsing change list: line %d: %s\nre-edit (y/n)?" % (line, err)):
return "change list not modified"
continue
cl.desc = clx.desc;
cl.reviewer = clx.reviewer
cl.cc = clx.cc
cl.files = clx.files
if cl.desc == '':
if promptyesno(ui, "change list should have description\nre-edit (y/n)?"):
continue
break
return ""
# For use by submit, etc. (NOT by change)
# Get change list number or list of files from command line.
# If files are given, make a new change list.
def CommandLineCL(ui, repo, pats, opts, defaultcc=None):
if len(pats) > 0 and GoodCLName(pats[0]):
if len(pats) != 1:
return None, "cannot specify change number and file names"
if opts.get('message'):
return None, "cannot use -m with existing CL"
cl, err = LoadCL(ui, repo, pats[0], web=True)
if err != "":
return None, err
else:
cl = CL("new")
cl.local = True
cl.files = Sub(ChangedFiles(ui, repo, pats, opts), TakenFiles(ui, repo))
if not cl.files:
return None, "no files changed"
if opts.get('reviewer'):
cl.reviewer = Add(cl.reviewer, SplitCommaSpace(opts.get('reviewer')))
if opts.get('cc'):
cl.cc = Add(cl.cc, SplitCommaSpace(opts.get('cc')))
if defaultcc:
cl.cc = Add(cl.cc, defaultcc)
if cl.name == "new":
if opts.get('message'):
cl.desc = opts.get('message')
else:
err = EditCL(ui, repo, cl)
if err != '':
return None, err
return cl, ""
# reposetup replaces cmdutil.match with this wrapper,
# which expands the syntax @clnumber to mean the files
# in that CL.
original_match = None
def ReplacementForCmdutilMatch(repo, pats=[], opts={}, globbed=False, default='relpath'):
taken = []
files = []
for p in pats:
if p.startswith('@'):
taken.append(p)
clname = p[1:]
if not GoodCLName(clname):
raise util.Abort("invalid CL name " + clname)
cl, err = LoadCL(repo.ui, repo, clname, web=False)
if err != '':
raise util.Abort("loading CL " + clname + ": " + err)
if cl.files == None:
raise util.Abort("no files in CL " + clname)
files = Add(files, cl.files)
pats = Sub(pats, taken) + ['path:'+f for f in files]
return original_match(repo, pats=pats, opts=opts, globbed=globbed, default=default)
def RelativePath(path, cwd):
n = len(cwd)
if path.startswith(cwd) and path[n] == '/':
return path[n+1:]
return path
# Check that gofmt run on the list of files does not change them
def CheckGofmt(ui, repo, files, just_warn=False):
files = [f for f in files if (f.startswith('src/') or f.startswith('test/bench/')) and f.endswith('.go')]
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
try:
cmd = subprocess.Popen(["gofmt", "-l"] + files, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
cmd.stdin.close()
except:
raise util.Abort("gofmt: " + ExceptionDetail())
data = cmd.stdout.read()
errors = cmd.stderr.read()
cmd.wait()
if len(errors) > 0:
ui.warn("gofmt errors:\n" + errors.rstrip() + "\n")
return
if len(data) > 0:
msg = "gofmt needs to format these files (run hg gofmt):\n" + Indent(data, "\t").rstrip()
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise util.Abort(msg)
return
#######################################################################
# Mercurial commands
# every command must take a ui and and repo as arguments.
# opts is a dict where you can find other command line flags
#
# Other parameters are taken in order from items on the command line that
# don't start with a dash. If no default value is given in the parameter list,
# they are required.
#
def change(ui, repo, *pats, **opts):
"""create or edit a change list
Create or edit a change list.
A change list is a group of files to be reviewed and submitted together,
plus a textual description of the change.
Change lists are referred to by simple alphanumeric names.
Changes must be reviewed before they can be submitted.
In the absence of options, the change command opens the
change list for editing in the default editor.
Deleting a change with the -d or -D flag does not affect
the contents of the files listed in that change. To revert
the files listed in a change, use
hg revert @123456
before running hg change -d 123456.
"""
dirty = {}
if len(pats) > 0 and GoodCLName(pats[0]):
name = pats[0]
if len(pats) != 1:
return "cannot specify CL name and file patterns"
pats = pats[1:]
cl, err = LoadCL(ui, repo, name, web=True)
if err != '':
return err
if not cl.local and (opts["stdin"] or not opts["stdout"]):
return "cannot change non-local CL " + name
else:
name = "new"
cl = CL("new")
dirty[cl] = True
files = ChangedFiles(ui, repo, pats, opts)
taken = TakenFiles(ui, repo)
files = Sub(files, taken)
if opts["delete"] or opts["deletelocal"]:
if opts["delete"] and opts["deletelocal"]:
return "cannot use -d and -D together"
flag = "-d"
if opts["deletelocal"]:
flag = "-D"
if name == "new":
return "cannot use "+flag+" with file patterns"
if opts["stdin"] or opts["stdout"]:
return "cannot use "+flag+" with -i or -o"
if not cl.local:
return "cannot change non-local CL " + name
if opts["delete"]:
if cl.copied_from:
return "original author must delete CL; hg change -D will remove locally"
PostMessage(ui, cl.name, "*** Abandoned ***")
EditDesc(cl.name, closed="checked")
cl.Delete(ui, repo)
return
if opts["stdin"]:
s = sys.stdin.read()
clx, line, err = ParseCL(s, name)
if err != '':
return "error parsing change list: line %d: %s" % (line, err)
if clx.desc is not None:
cl.desc = clx.desc;
dirty[cl] = True
if clx.reviewer is not None:
cl.reviewer = clx.reviewer
dirty[cl] = True
if clx.cc is not None:
cl.cc = clx.cc
dirty[cl] = True
if clx.files is not None:
cl.files = clx.files
dirty[cl] = True
if not opts["stdin"] and not opts["stdout"]:
if name == "new":
cl.files = files
err = EditCL(ui, repo, cl)
if err != "":
return err
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
if opts["stdout"]:
ui.write(cl.EditorText())
elif name == "new":
if ui.quiet:
ui.write(cl.name)
else:
ui.write("CL created: " + cl.url + "\n")
return
def code_login(ui, repo, **opts):
"""log in to code review server
Logs in to the code review server, saving a cookie in
a file in your home directory.
"""
MySend(None)
def clpatch(ui, repo, clname, **opts):
"""import a patch from the code review server
Imports a patch from the code review server into the local client.
If the local client has already modified any of the files that the
patch modifies, this command will refuse to apply the patch.
Submitting an imported patch will keep the original author's
name as the Author: line but add your own name to a Committer: line.
"""
cl, patch, err = DownloadCL(ui, repo, clname)
argv = ["hgpatch"]
if opts["fuzzy"]:
argv += ["--fuzzy"]
if opts["no_incoming"]:
argv += ["--checksync=false"]
if err != "":
return err
try:
cmd = subprocess.Popen(argv, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=True)
except:
return "hgpatch: " + ExceptionDetail()
if os.fork() == 0:
cmd.stdin.write(patch)
os._exit(0)
cmd.stdin.close()
out = cmd.stdout.read()
if cmd.wait() != 0 and not opts["ignore_hgpatch_failure"]:
return "hgpatch failed"
cl.local = True
cl.files = out.strip().split()
files = ChangedFiles(ui, repo, [], opts)
extra = Sub(cl.files, files)
if extra:
ui.warn("warning: these files were listed in the patch but not changed:\n\t" + "\n\t".join(extra) + "\n")
cl.Flush(ui, repo)
ui.write(cl.PendingText() + "\n")
def download(ui, repo, clname, **opts):
"""download a change from the code review server
Download prints a description of the given change list
followed by its diff, downloaded from the code review server.
"""
cl, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
ui.write(cl.EditorText() + "\n")
ui.write(patch + "\n")
return
def file(ui, repo, clname, pat, *pats, **opts):
"""assign files to or remove files from a change list
Assign files to or (with -d) remove files from a change list.
The -d option only removes files from the change list.
It does not edit them or remove them from the repository.
"""
pats = tuple([pat] + list(pats))
if not GoodCLName(clname):
return "invalid CL name " + clname
dirty = {}
cl, err = LoadCL(ui, repo, clname, web=False)
if err != '':
return err
if not cl.local:
return "cannot change non-local CL " + clname
files = ChangedFiles(ui, repo, pats, opts)
if opts["delete"]:
oldfiles = Intersect(files, cl.files)
if oldfiles:
if not ui.quiet:
ui.status("# Removing files from CL. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
for f in oldfiles:
ui.status("# hg file %s %s\n" % (cl.name, f))
cl.files = Sub(cl.files, oldfiles)
cl.Flush(ui, repo)
else:
ui.status("no such files in CL")
return
if not files:
return "no such modified files"
files = Sub(files, cl.files)
taken = Taken(ui, repo)
warned = False
for f in files:
if f in taken:
if not warned and not ui.quiet:
ui.status("# Taking files from other CLs. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
warned = True
ocl = taken[f]
if not ui.quiet:
ui.status("# hg file %s %s\n" % (ocl.name, f))
if ocl not in dirty:
ocl.files = Sub(ocl.files, files)
dirty[ocl] = True
cl.files = Add(cl.files, files)
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
return
def gofmt(ui, repo, *pats, **opts):
"""apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
"""
files = ChangedExistingFiles(ui, repo, pats, opts)
files = [f for f in files if f.endswith(".go")]
if not files:
return "no modified go files"
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
try:
cmd = ["gofmt", "-l"]
if not opts["list"]:
cmd += ["-w"]
if os.spawnvp(os.P_WAIT, "gofmt", cmd + files) != 0:
raise util.Abort("gofmt did not exit cleanly")
except error.Abort, e:
raise
except:
raise util.Abort("gofmt: " + ExceptionDetail())
return
def mail(ui, repo, *pats, **opts):
"""mail a change for review
Uploads a patch to the code review server and then sends mail
to the reviewer and CC list asking for a review.
"""
cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
if err != "":
return err
cl.Upload(ui, repo, gofmt_just_warn=True)
if not cl.reviewer:
# If no reviewer is listed, assign the review to defaultcc.
# This makes sure that it appears in the
# codereview.appspot.com/user/defaultcc
# page, so that it doesn't get dropped on the floor.
if not defaultcc:
return "no reviewers listed in CL"
cl.cc = Sub(cl.cc, defaultcc)
cl.reviewer = defaultcc
cl.Flush(ui, repo)
cl.Mail(ui, repo)
def nocommit(ui, repo, *pats, **opts):
"""(disabled when using this extension)"""
return "The codereview extension is enabled; do not use commit."
def pending(ui, repo, *pats, **opts):
"""show pending changes
Lists pending changes followed by a list of unassigned but modified files.
"""
m = LoadAllCL(ui, repo, web=True)
names = m.keys()
names.sort()
for name in names:
cl = m[name]
ui.write(cl.PendingText() + "\n")
files = DefaultFiles(ui, repo, [], opts)
if len(files) > 0:
s = "Changed files not in any CL:\n"
for f in files:
s += "\t" + f + "\n"
ui.write(s)
def reposetup(ui, repo):
global original_match
if original_match is None:
original_match = cmdutil.match
cmdutil.match = ReplacementForCmdutilMatch
RietveldSetup(ui, repo)
def CheckContributor(ui, repo, user=None):
if not user:
user = ui.config("ui", "username")
if not user:
raise util.Abort("[ui] username is not configured in .hgrc")
_, userline = FindContributor(ui, repo, user, warn=False)
if not userline:
raise util.Abort("cannot find %s in CONTRIBUTORS" % (user,))
return userline
def FindContributor(ui, repo, user, warn=True):
try:
f = open(repo.root + '/CONTRIBUTORS', 'r')
except:
raise util.Abort("cannot open %s: %s" % (repo.root+'/CONTRIBUTORS', ExceptionDetail()))
for line in f.readlines():
line = line.rstrip()
if line.startswith('#'):
continue
match = re.match(r"(.*) <(.*)>", line)
if not match:
continue
if line == user or match.group(2).lower() == user.lower():
return match.group(2), line
if warn:
ui.warn("warning: cannot find %s in CONTRIBUTORS\n" % (user,))
return None, None
def submit(ui, repo, *pats, **opts):
"""submit change to remote repository
Submits change to remote repository.
Bails out if the local repository is not in sync with the remote one.
"""
repo.ui.quiet = True
if not opts["no_incoming"] and Incoming(ui, repo, opts):
return "local repository out of date; must sync before submit"
cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
if err != "":
return err
user = None
if cl.copied_from:
user = cl.copied_from
userline = CheckContributor(ui, repo, user)
about = ""
if cl.reviewer:
about += "R=" + JoinComma([CutDomain(s) for s in cl.reviewer]) + "\n"
if opts.get('tbr'):
tbr = SplitCommaSpace(opts.get('tbr'))
cl.reviewer = Add(cl.reviewer, tbr)
about += "TBR=" + JoinComma([CutDomain(s) for s in tbr]) + "\n"
if cl.cc:
about += "CC=" + JoinComma([CutDomain(s) for s in cl.cc]) + "\n"
if not cl.reviewer:
return "no reviewers listed in CL"
if not cl.local:
return "cannot submit non-local CL"
# upload, to sync current patch and also get change number if CL is new.
if not cl.copied_from:
cl.Upload(ui, repo, gofmt_just_warn=True)
# check gofmt for real; allowed upload to warn in order to save CL.
cl.Flush(ui, repo)
CheckGofmt(ui, repo, cl.files)
about += "%s%s\n" % (server_url_base, cl.name)
if cl.copied_from:
about += "\nCommitter: " + CheckContributor(ui, repo, None) + "\n"
if not cl.mailed and not cl.copied_from: # in case this is TBR
cl.Mail(ui, repo)
# submit changes locally
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
opts['message'] = cl.desc.rstrip() + "\n\n" + about
if opts['dryrun']:
print "NOT SUBMITTING:"
print "User: ", userline
print "Message:"
print Indent(opts['message'], "\t")
print "Files:"
print Indent('\n'.join(cl.files), "\t")
return "dry run; not submitted"
m = match.exact(repo.root, repo.getcwd(), cl.files)
node = repo.commit(opts['message'], userline, opts.get('date'), m)
if not node:
return "nothing changed"
# push to remote; if it fails for any reason, roll back
try:
log = repo.changelog
rev = log.rev(node)
parents = log.parentrevs(rev)
if (rev-1 not in parents and
(parents == (nullrev, nullrev) or
len(log.heads(log.node(parents[0]))) > 1 and
(parents[1] == nullrev or len(log.heads(log.node(parents[1]))) > 1))):
# created new head
raise util.Abort("local repository out of date; must sync before submit")
# push changes to remote.
# if it works, we're committed.
# if not, roll back
other = getremote(ui, repo, opts)
r = repo.push(other, False, None)
if r == 0:
raise util.Abort("local repository out of date; must sync before submit")
except:
repo.rollback()
raise
# we're committed. upload final patch, close review, add commit message
changeURL = short(node)
url = other.url()
m = re.match("^https?://([^@/]+@)?([^.]+)\.googlecode\.com/hg/", url)
if m:
changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(2), changeURL)
else:
print >>sys.stderr, "URL: ", url
pmsg = "*** Submitted as " + changeURL + " ***\n\n" + opts['message']
# When posting, move reviewers to CC line,
# so that the issue stops showing up in their "My Issues" page.
PostMessage(ui, cl.name, pmsg, reviewers="", cc=JoinComma(cl.reviewer+cl.cc))
if not cl.copied_from:
EditDesc(cl.name, closed="checked")
cl.Delete(ui, repo)
def sync(ui, repo, **opts):
"""synchronize with remote repository
Incorporates recent changes from the remote repository
into the local repository.
"""
if not opts["local"]:
ui.status = sync_note
ui.note = sync_note
other = getremote(ui, repo, opts)
modheads = repo.pull(other)
err = commands.postincoming(ui, repo, modheads, True, "tip")
if err:
return err
commands.update(ui, repo)
sync_changes(ui, repo)
def sync_note(msg):
# we run sync (pull -u) in verbose mode to get the
# list of files being updated, but that drags along
# a bunch of messages we don't care about.
# omit them.
if msg == 'resolving manifests\n':
return
if msg == 'searching for changes\n':
return
if msg == "couldn't find merge tool hgmerge\n":
return
sys.stdout.write(msg)
def sync_changes(ui, repo):
# Look through recent change log descriptions to find
# potential references to http://.*/our-CL-number.
# Double-check them by looking at the Rietveld log.
def Rev(rev):
desc = repo[rev].description().strip()
for clname in re.findall('(?m)^http://(?:[^\n]+)/([0-9]+)$', desc):
if IsLocalCL(ui, repo, clname) and IsRietveldSubmitted(ui, clname, repo[rev].hex()):
ui.warn("CL %s submitted as %s; closing\n" % (clname, repo[rev]))
cl, err = LoadCL(ui, repo, clname, web=False)
if err != "":
ui.warn("loading CL %s: %s\n" % (clname, err))
continue
if not cl.copied_from:
EditDesc(cl.name, closed="checked")
cl.Delete(ui, repo)
if hgversion < '1.4':
get = util.cachefunc(lambda r: repo[r].changeset())
changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, [], get, {'rev': None})
n = 0
for st, rev, fns in changeiter:
if st != 'iter':
continue
n += 1
if n > 100:
break
Rev(rev)
else:
matchfn = cmdutil.match(repo, [], {'rev': None})
def prep(ctx, fns):
pass
for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev': None}, prep):
Rev(ctx.rev())
# Remove files that are not modified from the CLs in which they appear.
all = LoadAllCL(ui, repo, web=False)
changed = ChangedFiles(ui, repo, [], {})
for _, cl in all.items():
extra = Sub(cl.files, changed)
if extra:
ui.warn("Removing unmodified files from CL %s:\n" % (cl.name,))
for f in extra:
ui.warn("\t%s\n" % (f,))
cl.files = Sub(cl.files, extra)
cl.Flush(ui, repo)
if not cl.files:
ui.warn("CL %s has no files; suggest hg change -d %s\n" % (cl.name, cl.name))
return
def uisetup(ui):
if "^commit|ci" in commands.table:
commands.table["^commit|ci"] = (nocommit, [], "")
def upload(ui, repo, name, **opts):
"""upload diffs to the code review server
Uploads the current modifications for a given change to the server.
"""
repo.ui.quiet = True
cl, err = LoadCL(ui, repo, name, web=True)
if err != "":
return err
if not cl.local:
return "cannot upload non-local change"
cl.Upload(ui, repo)
print "%s%s\n" % (server_url_base, cl.name)
return
review_opts = [
('r', 'reviewer', '', 'add reviewer'),
('', 'cc', '', 'add cc'),
('', 'tbr', '', 'add future reviewer'),
('m', 'message', '', 'change description (for new change)'),
]
cmdtable = {
# The ^ means to show this command in the help text that
# is printed when running hg with no arguments.
"^change": (
change,
[
('d', 'delete', None, 'delete existing change list'),
('D', 'deletelocal', None, 'delete locally, but do not change CL on server'),
('i', 'stdin', None, 'read change list from standard input'),
('o', 'stdout', None, 'print change list to standard output'),
],
"[-d | -D] [-i] [-o] change# or FILE ..."
),
"^clpatch": (
clpatch,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
('', 'fuzzy', None, 'attempt to adjust patch line numbers'),
],
"change#"
),
# Would prefer to call this codereview-login, but then
# hg help codereview prints the help for this command
# instead of the help for the extension.
"code-login": (
code_login,
[],
"",
),
"commit|ci": (
nocommit,
[],
"",
),
"^download": (
download,
[],
"change#"
),
"^file": (
file,
[
('d', 'delete', None, 'delete files from change list (but not repository)'),
],
"[-d] change# FILE ..."
),
"^gofmt": (
gofmt,
[
('l', 'list', None, 'list files that would change, but do not edit them'),
],
"FILE ..."
),
"^pending|p": (
pending,
[],
"[FILE ...]"
),
"^mail": (
mail,
review_opts + [
] + commands.walkopts,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^submit": (
submit,
review_opts + [
('', 'no_incoming', None, 'disable initial incoming check (for testing)'),
('n', 'dryrun', None, 'make change only locally (for testing)'),
] + commands.walkopts + commands.commitopts + commands.commitopts2,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^sync": (
sync,
[
('', 'local', None, 'do not pull changes from remote repository')
],
"[--local]",
),
"^upload": (
upload,
[],
"change#"
),
}
#######################################################################
# Wrappers around upload.py for interacting with Rietveld
# HTML form parser
class FormParser(HTMLParser):
def __init__(self):
self.map = {}
self.curtag = None
self.curdata = None
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == "input":
key = None
value = ''
for a in attrs:
if a[0] == 'name':
key = a[1]
if a[0] == 'value':
value = a[1]
if key is not None:
self.map[key] = value
if tag == "textarea":
key = None
for a in attrs:
if a[0] == 'name':
key = a[1]
if key is not None:
self.curtag = key
self.curdata = ''
def handle_endtag(self, tag):
if tag == "textarea" and self.curtag is not None:
self.map[self.curtag] = self.curdata
self.curtag = None
self.curdata = None
def handle_charref(self, name):
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
import htmlentitydefs
if name in htmlentitydefs.entitydefs:
self.handle_data(htmlentitydefs.entitydefs[name])
else:
self.handle_data("&" + name + ";")
def handle_data(self, data):
if self.curdata is not None:
self.curdata += data.decode("utf-8").encode("utf-8")
# XML parser
def XMLGet(ui, path):
try:
data = MySend(path, force_auth=False);
except:
ui.warn("XMLGet %s: %s\n" % (path, ExceptionDetail()))
return None
return ET.XML(data)
def IsRietveldSubmitted(ui, clname, hex):
feed = XMLGet(ui, "/rss/issue/" + clname)
if feed is None:
return False
for sum in feed.findall("{http://www.w3.org/2005/Atom}entry/{http://www.w3.org/2005/Atom}summary"):
text = sum.findtext("", None).strip()
m = re.match('\*\*\* Submitted as [^*]*?([0-9a-f]+) \*\*\*', text)
if m is not None and len(m.group(1)) >= 8 and hex.startswith(m.group(1)):
return True
return False
def DownloadCL(ui, repo, clname):
cl, err = LoadCL(ui, repo, clname)
if err != "":
return None, None, "error loading CL %s: %s" % (clname, ExceptionDetail())
# Grab RSS feed to learn about CL
feed = XMLGet(ui, "/rss/issue/" + clname)
if feed is None:
return None, None, "cannot download CL"
# Find most recent diff
diff = None
prefix = 'http://' + server + '/'
for link in feed.findall("{http://www.w3.org/2005/Atom}entry/{http://www.w3.org/2005/Atom}link"):
if link.get('rel') != 'alternate':
continue
text = link.get('href')
if not text.startswith(prefix) or not text.endswith('.diff'):
continue
diff = text[len(prefix)-1:]
if diff is None:
return None, None, "CL has no diff"
diffdata = MySend(diff, force_auth=False)
# Find author - first entry will be author who created CL.
nick = None
for author in feed.findall("{http://www.w3.org/2005/Atom}entry/{http://www.w3.org/2005/Atom}author/{http://www.w3.org/2005/Atom}name"):
nick = author.findtext("", None).strip()
break
if not nick:
return None, None, "CL has no author"
# The author is just a nickname: get the real email address.
try:
# want URL-encoded nick, but without a=, and rietveld rejects + for %20.
url = "/user_popup/" + urllib.urlencode({"a": nick})[2:].replace("+", "%20")
data = MySend(url, force_auth=False)
except:
ui.warn("error looking up %s: %s\n" % (nick, ExceptionDetail()))
cl.copied_from = nick+"@needtofix"
return cl, diffdata, ""
match = re.match(r"<b>(.*) \((.*)\)</b>", data)
if not match:
return None, None, "error looking up %s: cannot parse result %s" % (nick, repr(data))
if match.group(1) != nick and match.group(2) != nick:
return None, None, "error looking up %s: got info for %s, %s" % (nick, match.group(1), match.group(2))
email = match.group(1)
# Print warning if email is not in CONTRIBUTORS file.
FindContributor(ui, repo, email)
cl.copied_from = email
return cl, diffdata, ""
def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Run MySend1 maybe twice, because Rietveld is unreliable."""
try:
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
except Exception, e:
if type(e) == urllib2.HTTPError and e.code == 403: # forbidden, it happens
raise
print >>sys.stderr, "Loading "+request_path+": "+ExceptionDetail()+"; trying again in 2 seconds."
time.sleep(2)
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
# Like upload.py Send but only authenticates when the
# redirect is to www.google.com/accounts. This keeps
# unnecessary redirects from happening during testing.
def MySend1(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
global rpc
if rpc == None:
rpc = GetRpcServer(upload_options)
self = rpc
if not self.authenticated and force_auth:
self._Authenticate()
if request_path is None:
return
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
# Translate \r\n into \n, because Rietveld doesn't.
response = response.replace('\r\n', '\n')
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
elif e.code == 302:
loc = e.info()["location"]
if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:
return ''
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
def GetForm(url):
f = FormParser()
f.feed(MySend(url))
f.close()
for k,v in f.map.items():
f.map[k] = v.replace("\r\n", "\n");
return f.map
# Fetch the settings for the CL, like reviewer and CC list, by
# scraping the Rietveld editing forms.
def GetSettings(issue):
# The /issue/edit page has everything but only the
# CL owner is allowed to fetch it (and submit it).
f = None
try:
f = GetForm("/" + issue + "/edit")
except:
pass
if not f or 'reviewers' not in f:
# Maybe we're not the CL owner. Fall back to the
# /publish page, which has the reviewer and CC lists,
# and then fetch the description separately.
f = GetForm("/" + issue + "/publish")
f['description'] = MySend("/"+issue+"/description", force_auth=False)
return f
def EditDesc(issue, subject=None, desc=None, reviewers=None, cc=None, closed=None):
form_fields = GetForm("/" + issue + "/edit")
if subject is not None:
form_fields['subject'] = subject
if desc is not None:
form_fields['description'] = desc
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if closed is not None:
form_fields['closed'] = closed
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/edit", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error editing description:\n" + "Sent form: \n", form_fields, "\n", response
sys.exit(2)
def PostMessage(ui, issue, message, reviewers=None, cc=None, send_mail=True, subject=None):
form_fields = GetForm("/" + issue + "/publish")
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if send_mail:
form_fields['send_mail'] = "checked"
else:
del form_fields['send_mail']
if subject is not None:
form_fields['subject'] = subject
form_fields['message'] = message
form_fields['message_only'] = '1' # Don't include draft comments
if reviewers is not None or cc is not None:
form_fields['message_only'] = '' # Must set '' in order to override cc/reviewer
ctype = "applications/x-www-form-urlencoded"
body = urllib.urlencode(form_fields)
response = MySend("/" + issue + "/publish", body, content_type=ctype)
if response != "":
print response
sys.exit(2)
class opt(object):
pass
def RietveldSetup(ui, repo):
global defaultcc, upload_options, rpc, server, server_url_base, force_google_account, verbosity
# Read repository-specific options from lib/codereview/codereview.cfg
try:
f = open(repo.root + '/lib/codereview/codereview.cfg')
for line in f:
if line.startswith('defaultcc: '):
defaultcc = SplitCommaSpace(line[10:])
except:
pass
# TODO(rsc): If the repository config has no codereview section,
# do not enable the extension. This allows users to
# put the extension in their global .hgrc but only
# enable it for some repositories.
# if not ui.has_section("codereview"):
# cmdtable = {}
# return
if not ui.verbose:
verbosity = 0
# Config options.
x = ui.config("codereview", "server")
if x is not None:
server = x
# TODO(rsc): Take from ui.username?
email = None
x = ui.config("codereview", "email")
if x is not None:
email = x
server_url_base = "http://" + server + "/"
testing = ui.config("codereview", "testing")
force_google_account = ui.configbool("codereview", "force_google_account", False)
upload_options = opt()
upload_options.email = email
upload_options.host = None
upload_options.verbose = 0
upload_options.description = None
upload_options.description_file = None
upload_options.reviewers = None
upload_options.cc = None
upload_options.message = None
upload_options.issue = None
upload_options.download_base = False
upload_options.revision = None
upload_options.send_mail = False
upload_options.vcs = None
upload_options.server = server
upload_options.save_cookies = True
if testing:
upload_options.save_cookies = False
upload_options.email = "test@example.com"
rpc = None
#######################################################################
# We keep a full copy of upload.py here to avoid import path hell.
# It would be nice if hg added the hg repository root
# to the default PYTHONPATH.
# Edit .+2,<hget http://codereview.appspot.com/static/upload.py
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_UNKNOWN = "Unknown"
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = ['application/javascript', 'application/x-javascript',
'application/x-freemind']
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_GIT.lower(): VCS_GIT,
}
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com") and not force_google_account:
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies_" + server)
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if type(value) == unicode:
value = value.encode("utf-8")
lines.append(value)
for (key, filename, value) in files:
if type(filename) == unicode:
filename = filename.encode("utf-8")
if type(value) == unicode:
value = value.encode("utf-8")
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the files, so we can upload them along with our diff.
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
extra_args = extra_args[:]
if self.options.revision:
extra_args = [self.options.revision] + extra_args
extra_args.append('-M')
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
gitdiff = RunShell(["git", "diff", "--no-ext-diff", "--full-index"]
+ extra_args, env=env)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
| if after == NULL_HASH: | 10,689 | lcc_e | python | null | 9e747fea1debf6f541b01de454b85d07f988ccaa14d308f4 |
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibUpload: Receive MARC XML file and update the appropriate database
tables according to options.
"""
__revision__ = "$Id$"
import os
import re
import sys
import time
from datetime import datetime
from zlib import compress
import socket
import marshal
import copy
import tempfile
import urlparse
import urllib2
import urllib
from invenio.config import CFG_OAI_ID_FIELD, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG, \
CFG_BIBUPLOAD_STRONG_TAGS, \
CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_DELETE_FORMATS, \
CFG_SITE_URL, CFG_SITE_SECURE_URL, CFG_SITE_RECORD, \
CFG_OAI_PROVENANCE_ALTERED_SUBFIELD, \
CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS, \
CFG_BIBUPLOAD_CONFLICTING_REVISION_TICKET_QUEUE
from invenio.jsonutils import json, CFG_JSON_AVAILABLE
from invenio.bibupload_config import CFG_BIBUPLOAD_CONTROLFIELD_TAGS, \
CFG_BIBUPLOAD_SPECIAL_TAGS, \
CFG_BIBUPLOAD_DELETE_CODE, \
CFG_BIBUPLOAD_DELETE_VALUE, \
CFG_BIBUPLOAD_OPT_MODES
from invenio.dbquery import run_sql, \
Error
from invenio.bibrecord import create_records, \
record_add_field, \
record_delete_field, \
record_xml_output, \
record_get_field_instances, \
record_get_field_value, \
record_get_field_values, \
field_get_subfield_values, \
field_get_subfield_instances, \
record_modify_subfield, \
record_delete_subfield_from, \
record_delete_fields, \
record_add_subfield_into, \
record_find_field, \
record_extract_oai_id, \
record_extract_dois, \
record_has_field,\
records_identical
from invenio.search_engine import get_record
from invenio.dateutils import convert_datestruct_to_datetext
from invenio.errorlib import register_exception
from invenio.bibcatalog import bibcatalog_system
from invenio.intbitset import intbitset
from invenio.urlutils import make_user_agent_string
from invenio.config import CFG_BIBDOCFILE_FILEDIR
from invenio.bibtask import task_init, write_message, \
task_set_option, task_get_option, task_get_task_param, task_update_status, \
task_update_progress, task_sleep_now_if_required, fix_argv_paths
from invenio.bibdocfile import BibRecDocs, file_strip_ext, normalize_format, \
get_docname_from_url, check_valid_url, download_url, \
KEEP_OLD_VALUE, decompose_bibdocfile_url, InvenioBibDocFileError, \
bibdocfile_url_p, CFG_BIBDOCFILE_AVAILABLE_FLAGS, guess_format_from_url, \
BibRelation, MoreInfo
from invenio.search_engine import search_pattern
from invenio.bibupload_revisionverifier import RevisionVerifier, \
InvenioBibUploadConflictingRevisionsError, \
InvenioBibUploadInvalidRevisionError, \
InvenioBibUploadMissing005Error, \
InvenioBibUploadUnchangedRecordError
#Statistic variables
stat = {}
stat['nb_records_to_upload'] = 0
stat['nb_records_updated'] = 0
stat['nb_records_inserted'] = 0
stat['nb_errors'] = 0
stat['nb_holdingpen'] = 0
stat['exectime'] = time.localtime()
_WRITING_RIGHTS = None
CFG_BIBUPLOAD_ALLOWED_SPECIAL_TREATMENTS = ('oracle', )
CFG_HAS_BIBCATALOG = "UNKNOWN"
def check_bibcatalog():
"""
Return True if bibcatalog is available.
"""
global CFG_HAS_BIBCATALOG # pylint: disable=W0603
if CFG_HAS_BIBCATALOG != "UNKNOWN":
return CFG_HAS_BIBCATALOG
CFG_HAS_BIBCATALOG = True
if bibcatalog_system is not None:
bibcatalog_response = bibcatalog_system.check_system()
else:
bibcatalog_response = "No ticket system configured"
if bibcatalog_response != "":
write_message("BibCatalog error: %s\n" % (bibcatalog_response,))
CFG_HAS_BIBCATALOG = False
return CFG_HAS_BIBCATALOG
## Let's set a reasonable timeout for URL request (e.g. FFT)
socket.setdefaulttimeout(40)
def parse_identifier(identifier):
"""Parse the identifier and determine if it is temporary or fixed"""
id_str = str(identifier)
if not id_str.startswith("TMP:"):
return (False, identifier)
else:
return (True, id_str[4:])
def resolve_identifier(tmps, identifier):
"""Resolves an identifier. If the identifier is not temporary, this
function is an identity on the second argument. Otherwise, a resolved
value is returned or an exception raised"""
is_tmp, tmp_id = parse_identifier(identifier)
if is_tmp:
if not tmp_id in tmps:
raise StandardError("Temporary identifier %s not present in the dictionary" % (tmp_id, ))
if tmps[tmp_id] == -1:
# the identifier has been signalised but never assigned a value - probably error during processing
raise StandardError("Temporary identifier %s has been declared, but never assigned a value. Probably an error during processign of an appropriate FFT has happened. Please see the log" % (tmp_id, ))
return int(tmps[tmp_id])
else:
return int(identifier)
_re_find_001 = re.compile('<controlfield\\s+tag=("001"|\'001\')\\s*>\\s*(\\d*)\\s*</controlfield>', re.S)
def bibupload_pending_recids():
"""This function embed a bit of A.I. and is more a hack than an elegant
algorithm. It should be updated in case bibupload/bibsched are modified
in incompatible ways.
This function return the intbitset of all the records that are being
(or are scheduled to be) touched by other bibuploads.
"""
options = run_sql("""SELECT arguments FROM schTASK WHERE status<>'DONE' AND
proc='bibupload' AND (status='RUNNING' OR status='CONTINUING' OR
status='WAITING' OR status='SCHEDULED' OR status='ABOUT TO STOP' OR
status='ABOUT TO SLEEP')""")
ret = intbitset()
xmls = []
if options:
for arguments in options:
arguments = marshal.loads(arguments[0])
for argument in arguments[1:]:
if argument.startswith('/'):
# XMLs files are recognizable because they're absolute
# files...
xmls.append(argument)
for xmlfile in xmls:
# Let's grep for the 001
try:
xml = open(xmlfile).read()
ret += [int(group[1]) for group in _re_find_001.findall(xml)]
except:
continue
return ret
### bibupload engine functions:
def bibupload(record, opt_mode=None, opt_notimechange=0, oai_rec_id="", pretend=False,
tmp_ids=None, tmp_vers=None):
"""Main function: process a record and fit it in the tables
bibfmt, bibrec, bibrec_bibxxx, bibxxx with proper record
metadata.
Return (error_code, recID) of the processed record.
"""
if tmp_ids is None:
tmp_ids = {}
if tmp_vers is None:
tmp_vers = {}
if opt_mode == 'reference':
## NOTE: reference mode has been deprecated in favour of 'correct'
opt_mode = 'correct'
assert(opt_mode in CFG_BIBUPLOAD_OPT_MODES)
error = None
affected_tags = {}
original_record = {}
rec_old = {}
now = datetime.now() # will hold record creation/modification date
record_had_altered_bit = False
is_opt_mode_delete = False
# Extraction of the Record Id from 001, SYSNO or OAIID or DOI tags:
rec_id = retrieve_rec_id(record, opt_mode, pretend=pretend)
if rec_id == -1:
msg = " Failed: either the record already exists and insert was " \
"requested or the record does not exists and " \
"replace/correct/append has been used"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, -1, msg)
elif rec_id > 0:
write_message(" -Retrieve record ID (found %s): DONE." % rec_id, verbose=2)
(unique_p, msg) = check_record_doi_is_unique(rec_id, record)
if not unique_p:
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if not record.has_key('001'):
# Found record ID by means of SYSNO or OAIID or DOI, and the
# input MARCXML buffer does not have this 001 tag, so we
# should add it now:
error = record_add_field(record, '001', controlfield_value=rec_id)
if error is None:
msg = " Failed: Error during adding the 001 controlfield " \
"to the record"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
error = None
write_message(" -Added tag 001: DONE.", verbose=2)
write_message(" -Check if the xml marc file is already in the database: DONE" , verbose=2)
record_deleted_p = False
if opt_mode == 'insert' or \
(opt_mode == 'replace_or_insert') and rec_id is None:
insert_mode_p = True
# Insert the record into the bibrec databases to have a recordId
rec_id = create_new_record(pretend=pretend)
write_message(" -Creation of a new record id (%d): DONE" % rec_id, verbose=2)
# we add the record Id control field to the record
error = record_add_field(record, '001', controlfield_value=rec_id)
if error is None:
msg = " Failed: Error during adding the 001 controlfield " \
"to the record"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
error = None
if '005' not in record:
error = record_add_field(record, '005', controlfield_value=now.strftime("%Y%m%d%H%M%S.0"))
if error is None:
msg = " Failed: Error during adding to 005 controlfield to record"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
error = None
else:
write_message(" Note: 005 already existing upon inserting of new record. Keeping it.", verbose=2)
elif opt_mode != 'insert':
insert_mode_p = False
# Update Mode
# Retrieve the old record to update
rec_old = get_record(rec_id)
record_had_altered_bit = record_get_field_values(rec_old, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4], CFG_OAI_PROVENANCE_ALTERED_SUBFIELD)
# Also save a copy to restore previous situation in case of errors
original_record = get_record(rec_id)
if rec_old is None:
msg = " Failed during the creation of the old record!"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
write_message(" -Retrieve the old record to update: DONE", verbose=2)
# flag to check whether the revisions have been verified and patch generated.
# If revision verification failed, then we need to manually identify the affected tags
# and process them
revision_verified = False
rev_verifier = RevisionVerifier()
#check for revision conflicts before updating record
if record_has_field(record, '005') and not CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS:
write_message(" -Upload Record has 005. Verifying Revision", verbose=2)
try:
rev_res = rev_verifier.verify_revision(record, original_record, opt_mode)
if rev_res:
opt_mode = rev_res[0]
record = rev_res[1]
affected_tags = rev_res[2]
revision_verified = True
write_message(lambda: " -Patch record generated. Changing opt_mode to correct.\nPatch:\n%s " % record_xml_output(record), verbose=2)
else:
write_message(" -No Patch Record.", verbose=2)
except InvenioBibUploadUnchangedRecordError, err:
msg = " -ISSUE: %s" % err
write_message(msg, verbose=1, stream=sys.stderr)
write_message(msg, " Continuing anyway in case there are FFT or other tags")
except InvenioBibUploadConflictingRevisionsError, err:
msg = " -ERROR: Conflicting Revisions - %s" % err
write_message(msg, verbose=1, stream=sys.stderr)
submit_ticket_for_holding_pen(rec_id, err, "Conflicting Revisions. Inserting record into holding pen.")
insert_record_into_holding_pen(record, str(rec_id))
return (2, int(rec_id), msg)
except InvenioBibUploadInvalidRevisionError, err:
msg = " -ERROR: Invalid Revision - %s" % err
write_message(msg)
submit_ticket_for_holding_pen(rec_id, err, "Invalid Revisions. Inserting record into holding pen.")
insert_record_into_holding_pen(record, str(rec_id))
return (2, int(rec_id), msg)
except InvenioBibUploadMissing005Error, err:
msg = " -ERROR: Missing 005 - %s" % err
write_message(msg)
submit_ticket_for_holding_pen(rec_id, err, "Missing 005. Inserting record into holding pen.")
insert_record_into_holding_pen(record, str(rec_id))
return (2, int(rec_id), msg)
else:
write_message(" - No 005 Tag Present. Resuming normal flow.", verbose=2)
# dictionaries to temporarily hold original recs tag-fields
existing_tags = {}
retained_tags = {}
# in case of delete operation affected tags should be deleted in delete_bibrec_bibxxx
# but should not be updated again in STAGE 4
# utilising the below flag
is_opt_mode_delete = False
if not revision_verified:
# either 005 was not present or opt_mode was not correct/replace
# in this case we still need to find out affected tags to process
write_message(" - Missing 005 or opt_mode!=Replace/Correct.Revision Verifier not called.", verbose=2)
# Identify affected tags
if opt_mode == 'correct' or opt_mode == 'replace' or opt_mode == 'replace_or_insert':
rec_diff = rev_verifier.compare_records(record, original_record, opt_mode)
affected_tags = rev_verifier.retrieve_affected_tags_with_ind(rec_diff)
elif opt_mode == 'delete':
# populate an intermediate dictionary
# used in upcoming step related to 'delete' mode
is_opt_mode_delete = True
for tag, fields in original_record.iteritems():
existing_tags[tag] = [tag + (field[1] != ' ' and field[1] or '_') + (field[2] != ' ' and field[2] or '_') for field in fields]
elif opt_mode == 'append':
for tag, fields in record.iteritems():
if tag not in CFG_BIBUPLOAD_CONTROLFIELD_TAGS:
affected_tags[tag]=[(field[1], field[2]) for field in fields]
# In Replace mode, take over old strong tags if applicable:
if opt_mode == 'replace' or \
opt_mode == 'replace_or_insert':
copy_strong_tags_from_old_record(record, rec_old)
# Delete tags to correct in the record
if opt_mode == 'correct':
delete_tags_to_correct(record, rec_old)
write_message(" -Delete the old tags to correct in the old record: DONE",
verbose=2)
# Delete tags specified if in delete mode
if opt_mode == 'delete':
record = delete_tags(record, rec_old)
for tag, fields in record.iteritems():
retained_tags[tag] = [tag + (field[1] != ' ' and field[1] or '_') + (field[2] != ' ' and field[2] or '_') for field in fields]
#identify the tags that have been deleted
for tag in existing_tags.keys():
if tag not in retained_tags:
for item in existing_tags[tag]:
tag_to_add = item[0:3]
ind1, ind2 = item[3], item[4]
if tag_to_add in affected_tags and (ind1, ind2) not in affected_tags[tag_to_add]:
affected_tags[tag_to_add].append((ind1, ind2))
else:
affected_tags[tag_to_add] = [(ind1, ind2)]
else:
deleted = list(set(existing_tags[tag]) - set(retained_tags[tag]))
for item in deleted:
tag_to_add = item[0:3]
ind1, ind2 = item[3], item[4]
if tag_to_add in affected_tags and (ind1, ind2) not in affected_tags[tag_to_add]:
affected_tags[tag_to_add].append((ind1, ind2))
else:
affected_tags[tag_to_add] = [(ind1, ind2)]
write_message(" -Delete specified tags in the old record: DONE", verbose=2)
# Append new tag to the old record and update the new record with the old_record modified
if opt_mode == 'append' or opt_mode == 'correct':
record = append_new_tag_to_old_record(record, rec_old)
write_message(" -Append new tags to the old record: DONE", verbose=2)
write_message(" -Affected Tags found after comparing upload and original records: %s"%(str(affected_tags)), verbose=2)
# 005 tag should be added everytime the record is modified
# If an exiting record is modified, its 005 tag should be overwritten with a new revision value
if record.has_key('005'):
record_delete_field(record, '005')
write_message(" Deleted the existing 005 tag.", verbose=2)
last_revision = run_sql("SELECT MAX(job_date) FROM hstRECORD WHERE id_bibrec=%s", (rec_id, ))[0][0]
if last_revision and last_revision.strftime("%Y%m%d%H%M%S.0") == now.strftime("%Y%m%d%H%M%S.0"):
## We are updating the same record within the same seconds! It's less than
## the minimal granularity. Let's pause for 1 more second to take a breath :-)
time.sleep(1)
now = datetime.now()
error = record_add_field(record, '005', controlfield_value=now.strftime("%Y%m%d%H%M%S.0"))
if error is None:
write_message(" Failed: Error during adding to 005 controlfield to record", verbose=1, stream=sys.stderr)
return (1, int(rec_id))
else:
error=None
write_message(lambda: " -Added tag 005: DONE. "+ str(record_get_field_value(record, '005', '', '')), verbose=2)
# adding 005 to affected tags will delete the existing 005 entry
# and update with the latest timestamp.
if '005' not in affected_tags:
affected_tags['005'] = [(' ', ' ')]
write_message(" -Stage COMPLETED", verbose=2)
record_deleted_p = False
try:
if not record_is_valid(record):
msg = "ERROR: record is not valid"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, -1, msg)
# Have a look if we have FFT tags
write_message("Stage 2: Start (Process FFT tags if exist).", verbose=2)
record_had_FFT = False
if extract_tag_from_record(record, 'FFT') is not None:
record_had_FFT = True
if not writing_rights_p():
write_message(" Stage 2 failed: Error no rights to write fulltext files",
verbose=1, stream=sys.stderr)
task_update_status("ERROR")
sys.exit(1)
try:
record = elaborate_fft_tags(record, rec_id, opt_mode,
pretend=pretend, tmp_ids=tmp_ids,
tmp_vers=tmp_vers)
except Exception, e:
register_exception()
msg = " Stage 2 failed: Error while elaborating FFT tags: %s" % e
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if record is None:
msg = " Stage 2 failed: Error while elaborating FFT tags"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
# Have a look if we have FFT tags
write_message("Stage 2B: Start (Synchronize 8564 tags).", verbose=2)
if record_had_FFT or extract_tag_from_record(record, '856') is not None:
try:
record = synchronize_8564(rec_id, record, record_had_FFT, pretend=pretend)
# in case if FFT is in affected list make appropriate changes
if opt_mode is not 'insert': # because for insert, all tags are affected
if ('4', ' ') not in affected_tags.get('856', []):
if '856' not in affected_tags:
affected_tags['856'] = [('4', ' ')]
elif ('4', ' ') not in affected_tags['856']:
affected_tags['856'].append(('4', ' '))
write_message(" -Modified field list updated with FFT details: %s" % str(affected_tags), verbose=2)
except Exception, e:
register_exception(alert_admin=True)
msg = " Stage 2B failed: Error while synchronizing 8564 tags: %s" % e
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if record is None:
msg = " Stage 2B failed: Error while synchronizing 8564 tags"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
write_message("Stage 3: Start (Apply fields deletion requests).", verbose=2)
write_message(lambda: " Record before deletion:\n%s" % record_xml_output(record), verbose=9)
# remove fields with __DELETE_FIELDS__
# NOTE:creating a temporary deep copy of record for iteration to avoid RunTimeError
# RuntimeError due to change in dictionary size during iteration
tmp_rec = copy.deepcopy(record)
for tag in tmp_rec:
for data_tuple in record[tag]:
if (CFG_BIBUPLOAD_DELETE_CODE, CFG_BIBUPLOAD_DELETE_VALUE) in data_tuple[0]:
# delete the tag with particular indicator pairs from original record
record_delete_field(record, tag, data_tuple[1], data_tuple[2])
write_message(lambda: " Record after cleaning up fields to be deleted:\n%s" % record_xml_output(record), verbose=9)
# Update of the BibFmt
write_message("Stage 4: Start (Update bibfmt).", verbose=2)
updates_exist = not records_identical(record, original_record)
if updates_exist:
# if record_had_altered_bit, this must be set to true, since the
# record has been altered.
if record_had_altered_bit:
oai_provenance_fields = record_get_field_instances(record, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
for oai_provenance_field in oai_provenance_fields:
for i, (code, dummy_value) in enumerate(oai_provenance_field[0]):
if code == CFG_OAI_PROVENANCE_ALTERED_SUBFIELD:
oai_provenance_field[0][i] = (code, 'true')
tmp_indicators = (CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
if tmp_indicators not in affected_tags.get(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], []):
if CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3] not in affected_tags:
affected_tags[CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3]] = [tmp_indicators]
else:
affected_tags[CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3]].append(tmp_indicators)
write_message(lambda: " Updates exists:\n%s\n!=\n%s" % (record, original_record), verbose=9)
# format the single record as xml
rec_xml_new = record_xml_output(record)
# Update bibfmt with the format xm of this record
modification_date = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(record_get_field_value(record, '005'), '%Y%m%d%H%M%S.0'))
error = update_bibfmt_format(rec_id, rec_xml_new, 'xm', modification_date, pretend=pretend)
if error == 1:
msg = " Failed: error during update_bibfmt_format 'xm'"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
error = update_bibfmt_format(rec_id, marshal.dumps(record), 'recstruct', modification_date, pretend=pretend)
if error == 1:
msg = " Failed: error during update_bibfmt_format 'recstruct'"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if not CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS:
# archive MARCXML format of this record for version history purposes:
error = archive_marcxml_for_history(rec_id, affected_fields=affected_tags, pretend=pretend)
if error == 1:
msg = " Failed to archive MARCXML for history"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
write_message(" -Archived MARCXML for history: DONE", verbose=2)
# delete some formats like HB upon record change:
if updates_exist or record_had_FFT:
for format_to_delete in CFG_BIBUPLOAD_DELETE_FORMATS:
try:
delete_bibfmt_format(rec_id, format_to_delete, pretend=pretend)
except:
# OK, some formats like HB could not have been deleted, no big deal
pass
write_message(" -Stage COMPLETED", verbose=2)
## Let's assert that one and only one 005 tag is existing at this stage.
assert len(record['005']) == 1
# Update the database MetaData
write_message("Stage 5: Start (Update the database with the metadata).",
verbose=2)
if insert_mode_p:
update_database_with_metadata(record, rec_id, oai_rec_id, pretend=pretend)
elif opt_mode in ('replace', 'replace_or_insert',
'append', 'correct', 'delete') and updates_exist:
# now we clear all the rows from bibrec_bibxxx from the old
record_deleted_p = True
delete_bibrec_bibxxx(rec_old, rec_id, affected_tags, pretend=pretend)
# metadata update will insert tags that are available in affected_tags.
# but for delete, once the tags have been deleted from bibrec_bibxxx, they dont have to be inserted
# except for 005.
if is_opt_mode_delete:
tmp_affected_tags = copy.deepcopy(affected_tags)
for tag in tmp_affected_tags:
if tag != '005':
affected_tags.pop(tag)
write_message(" -Clean bibrec_bibxxx: DONE", verbose=2)
update_database_with_metadata(record, rec_id, oai_rec_id, affected_tags, pretend=pretend)
else:
write_message(" -Stage NOT NEEDED in mode %s" % opt_mode,
verbose=2)
write_message(" -Stage COMPLETED", verbose=2)
record_deleted_p = False
# Finally we update the bibrec table with the current date
write_message("Stage 6: Start (Update bibrec table with current date).",
verbose=2)
if opt_notimechange == 0 and (updates_exist or record_had_FFT):
bibrec_now = convert_datestruct_to_datetext(time.localtime())
write_message(" -Retrieved current localtime: DONE", verbose=2)
update_bibrec_date(bibrec_now, rec_id, insert_mode_p, pretend=pretend)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
# Increase statistics
if insert_mode_p:
stat['nb_records_inserted'] += 1
else:
stat['nb_records_updated'] += 1
# Upload of this record finish
write_message("Record "+str(rec_id)+" DONE", verbose=1)
return (0, int(rec_id), "")
finally:
if record_deleted_p:
## BibUpload has failed living the record deleted. We should
## back the original record then.
update_database_with_metadata(original_record, rec_id, oai_rec_id, pretend=pretend)
write_message(" Restored original record", verbose=1, stream=sys.stderr)
def record_is_valid(record):
"""
Check if the record is valid. Currently this simply checks if the record
has exactly one rec_id.
@param record: the record
@type record: recstruct
@return: True if the record is valid
@rtype: bool
"""
rec_ids = record_get_field_values(record, tag="001")
if len(rec_ids) != 1:
write_message(" The record is not valid: it has not a single rec_id: %s" % (rec_ids), stream=sys.stderr)
return False
return True
def find_record_ids_by_oai_id(oaiId):
"""
A method finding the records identifier provided the oai identifier
returns a list of identifiers matching a given oai identifier
"""
# Is this record already in invenio (matching by oaiid)
if oaiId:
recids = search_pattern(p=oaiId, f=CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, m='e')
# Is this record already in invenio (matching by reportnumber i.e.
# particularly 037. Idea: to avoid doubbles insertions)
repnumber = oaiId.split(":")[-1]
if repnumber:
recids |= search_pattern(p = repnumber,
f = "reportnumber",
m = 'e' )
# Is this record already in invenio (matching by reportnumber i.e.
# particularly 037. Idea: to avoid double insertions)
repnumber = "arXiv:" + oaiId.split(":")[-1]
recids |= search_pattern(p = repnumber,
f = "reportnumber",
m = 'e' )
return recids
else:
return intbitset()
def bibupload_post_phase(record, mode=None, rec_id="", pretend=False,
tmp_ids=None, tmp_vers=None):
def _elaborate_tag(record, tag, fun):
if extract_tag_from_record(record, tag) is not None:
try:
record = fun()
except Exception, e:
register_exception()
write_message(" Stage failed: Error while elaborating %s tags: %s" % (tag, e),
verbose=1, stream=sys.stderr)
return (1, int(rec_id)) # TODO: ?
if record is None:
write_message(" Stage failed: Error while elaborating %s tags" % (tag, ),
verbose=1, stream=sys.stderr)
return (1, int(rec_id))
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
if tmp_ids is None:
tmp_ids = {}
if tmp_vers is None:
tmp_vers = {}
_elaborate_tag(record, "BDR", lambda: elaborate_brt_tags(record, rec_id = rec_id,
mode = mode,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers))
_elaborate_tag(record, "BDM", lambda: elaborate_mit_tags(record, rec_id = rec_id,
mode = mode,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers))
def submit_ticket_for_holding_pen(rec_id, err, msg):
"""
Submit a ticket via BibCatalog to report about a record that has been put
into the Holding Pen.
@rec_id: the affected record
@err: the corresponding Exception
msg: verbose message
"""
from invenio import bibtask
from invenio.webuser import get_email_from_username, get_uid_from_email
user = task_get_task_param("user")
uid = None
if user:
try:
uid = get_uid_from_email(get_email_from_username(user))
except Exception, err:
write_message("WARNING: can't reliably retrieve uid for user %s: %s" % (user, err), stream=sys.stderr)
if check_bibcatalog():
text = """
%(msg)s found for record %(rec_id)s: %(err)s
See: <%(siteurl)s/record/edit/#state=edit&recid=%(rec_id)s>
BibUpload task information:
task_id: %(task_id)s
task_specific_name: %(task_specific_name)s
user: %(user)s
task_params: %(task_params)s
task_options: %(task_options)s""" % {
"msg": msg,
"rec_id": rec_id,
"err": err,
"siteurl": CFG_SITE_SECURE_URL,
"task_id": task_get_task_param("task_id"),
"task_specific_name": task_get_task_param("task_specific_name"),
"user": user,
"task_params": bibtask._TASK_PARAMS,
"task_options": bibtask._OPTIONS}
bibcatalog_system.ticket_submit(subject="%s: %s by %s" % (msg, rec_id, user), recordid=rec_id, text=text, queue=CFG_BIBUPLOAD_CONFLICTING_REVISION_TICKET_QUEUE, owner=uid)
def insert_record_into_holding_pen(record, oai_id, pretend=False):
query = "INSERT INTO bibHOLDINGPEN (oai_id, changeset_date, changeset_xml, id_bibrec) VALUES (%s, NOW(), %s, %s)"
xml_record = record_xml_output(record)
bibrec_ids = find_record_ids_by_oai_id(oai_id) # here determining the identifier of the record
if len(bibrec_ids) > 0:
bibrec_id = bibrec_ids.pop()
else:
# id not found by using the oai_id, let's use a wider search based
# on any information we might have.
bibrec_id = retrieve_rec_id(record, 'holdingpen', pretend=pretend)
if bibrec_id is None:
bibrec_id = 0
if not pretend:
run_sql(query, (oai_id, xml_record, bibrec_id))
# record_id is logged as 0! ( We are not inserting into the main database)
log_record_uploading(oai_id, task_get_task_param('task_id', 0), 0, 'H', pretend=pretend)
stat['nb_holdingpen'] += 1
def print_out_bibupload_statistics():
"""Print the statistics of the process"""
out = "Task stats: %(nb_input)d input records, %(nb_updated)d updated, " \
"%(nb_inserted)d inserted, %(nb_errors)d errors, %(nb_holdingpen)d inserted to holding pen. " \
"Time %(nb_sec).2f sec." % { \
'nb_input': stat['nb_records_to_upload'],
'nb_updated': stat['nb_records_updated'],
'nb_inserted': stat['nb_records_inserted'],
'nb_errors': stat['nb_errors'],
'nb_holdingpen': stat['nb_holdingpen'],
'nb_sec': time.time() - time.mktime(stat['exectime']) }
write_message(out)
def open_marc_file(path):
"""Open a file and return the data"""
try:
# open the file containing the marc document
marc_file = open(path, 'r')
marc = marc_file.read()
marc_file.close()
except IOError, erro:
write_message("Error: %s" % erro, verbose=1, stream=sys.stderr)
write_message("Exiting.", sys.stderr)
if erro.errno == 2:
# No such file or directory
# Not scary
task_update_status("CERROR")
else:
task_update_status("ERROR")
sys.exit(1)
return marc
def xml_marc_to_records(xml_marc):
"""create the records"""
# Creation of the records from the xml Marc in argument
recs = create_records(xml_marc, 1, 1)
if recs == []:
write_message("Error: Cannot parse MARCXML file.", verbose=1, stream=sys.stderr)
write_message("Exiting.", sys.stderr)
task_update_status("ERROR")
sys.exit(1)
elif recs[0][0] is None:
write_message("Error: MARCXML file has wrong format: %s" % recs,
verbose=1, stream=sys.stderr)
write_message("Exiting.", sys.stderr)
task_update_status("CERROR")
sys.exit(1)
else:
recs = map((lambda x:x[0]), recs)
return recs
def find_record_format(rec_id, bibformat):
"""Look whether record REC_ID is formatted in FORMAT,
i.e. whether FORMAT exists in the bibfmt table for this record.
Return the number of times it is formatted: 0 if not, 1 if yes,
2 if found more than once (should never occur).
"""
out = 0
query = """SELECT COUNT(*) FROM bibfmt WHERE id_bibrec=%s AND format=%s"""
params = (rec_id, bibformat)
res = []
res = run_sql(query, params)
out = res[0][0]
return out
def find_record_from_recid(rec_id):
"""
Try to find record in the database from the REC_ID number.
Return record ID if found, None otherwise.
"""
res = run_sql("SELECT id FROM bibrec WHERE id=%s",
(rec_id,))
if res:
return res[0][0]
else:
return None
def find_record_from_sysno(sysno):
"""
Try to find record in the database from the external SYSNO number.
Return record ID if found, None otherwise.
"""
bibxxx = 'bib'+CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[0:2]+'x'
bibrec_bibxxx = 'bibrec_' + bibxxx
res = run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb,
%(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
(CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, sysno,))
if res:
return res[0][0]
else:
return None
def find_records_from_extoaiid(extoaiid, extoaisrc=None):
"""
Try to find records in the database from the external EXTOAIID number.
Return list of record ID if found, None otherwise.
"""
assert(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:5] == CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[:5])
bibxxx = 'bib'+CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:2]+'x'
bibrec_bibxxx = 'bibrec_' + bibxxx
write_message(' Looking for extoaiid="%s" with extoaisrc="%s"' % (extoaiid, extoaisrc), verbose=9)
id_bibrecs = intbitset(run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb,
%(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, extoaiid,)))
write_message(' Partially found %s for extoaiid="%s"' % (id_bibrecs, extoaiid), verbose=9)
ret = intbitset()
for id_bibrec in id_bibrecs:
record = get_record(id_bibrec)
instances = record_get_field_instances(record, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
write_message(' recid %s -> instances "%s"' % (id_bibrec, instances), verbose=9)
for instance in instances:
this_extoaisrc = field_get_subfield_values(instance, CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[5])
this_extoaisrc = this_extoaisrc and this_extoaisrc[0] or None
this_extoaiid = field_get_subfield_values(instance, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5])
this_extoaiid = this_extoaiid and this_extoaiid[0] or None
write_message(" this_extoaisrc -> %s, this_extoaiid -> %s" % (this_extoaisrc, this_extoaiid), verbose=9)
if this_extoaiid == extoaiid:
write_message(' recid %s -> provenance "%s"' % (id_bibrec, this_extoaisrc), verbose=9)
if this_extoaisrc == extoaisrc:
write_message('Found recid %s for extoaiid="%s" with provenance="%s"' % (id_bibrec, extoaiid, extoaisrc), verbose=9)
ret.add(id_bibrec)
break
if this_extoaisrc is None:
write_message('WARNING: Found recid %s for extoaiid="%s" that doesn\'t specify any provenance, while input record does.' % (id_bibrec, extoaiid), stream=sys.stderr)
if extoaisrc is None:
write_message('WARNING: Found recid %s for extoaiid="%s" that specify a provenance (%s), while input record does not have a provenance.' % (id_bibrec, extoaiid, this_extoaisrc), stream=sys.stderr)
return ret
def find_record_from_oaiid(oaiid):
"""
Try to find record in the database from the OAI ID number and OAI SRC.
Return record ID if found, None otherwise.
"""
bibxxx = 'bib'+CFG_OAI_ID_FIELD[0:2]+'x'
bibrec_bibxxx = 'bibrec_' + bibxxx
res = run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb,
%(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
(CFG_OAI_ID_FIELD, oaiid,))
if res:
return res[0][0]
else:
return None
def find_record_from_doi(doi):
"""
Try to find record in the database from the given DOI.
Return record ID if found, None otherwise.
"""
bibxxx = 'bib02x'
bibrec_bibxxx = 'bibrec_' + bibxxx
res = run_sql("""SELECT bb.id_bibrec, bb.field_number
FROM %(bibrec_bibxxx)s AS bb, %(bibxxx)s AS b
WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
('0247_a', doi,))
# For each of the result, make sure that it is really tagged as doi
for (id_bibrec, field_number) in res:
res = run_sql("""SELECT bb.id_bibrec
FROM %(bibrec_bibxxx)s AS bb, %(bibxxx)s AS b
WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id and bb.field_number=%%s and bb.id_bibrec=%%s""" % \
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
('0247_2', "doi", field_number, id_bibrec))
if res and res[0][0] == id_bibrec:
return res[0][0]
return None
def extract_tag_from_record(record, tag_number):
""" Extract the tag_number for record."""
# first step verify if the record is not already in the database
if record:
return record.get(tag_number, None)
return None
def retrieve_rec_id(record, opt_mode, pretend=False, post_phase = False):
"""Retrieve the record Id from a record by using tag 001 or SYSNO or OAI ID or DOI
tag. opt_mod is the desired mode.
@param post_phase Tells if we are calling this method in the postprocessing phase. If true, we accept presence of 001 fields even in the insert mode
@type post_phase boolean
"""
rec_id = None
# 1st step: we look for the tag 001
tag_001 = extract_tag_from_record(record, '001')
if tag_001 is not None:
# We extract the record ID from the tag
rec_id = tag_001[0][3]
# if we are in insert mode => error
if opt_mode == 'insert' and not post_phase:
write_message(" Failed: tag 001 found in the xml" \
" submitted, you should use the option replace," \
" correct or append to replace an existing" \
" record. (-h for help)",
verbose=1, stream=sys.stderr)
return -1
else:
# we found the rec id and we are not in insert mode => continue
# we try to match rec_id against the database:
if find_record_from_recid(rec_id) is not None:
# okay, 001 corresponds to some known record
return int(rec_id)
elif opt_mode in ('replace', 'replace_or_insert'):
if task_get_option('force'):
# we found the rec_id but it's not in the system and we are
# requested to replace records. Therefore we create on the fly
# a empty record allocating the recid.
write_message(" Warning: tag 001 found in the xml with"
" value %(rec_id)s, but rec_id %(rec_id)s does"
" not exist. Since the mode replace was"
" requested the rec_id %(rec_id)s is allocated"
" on-the-fly." % {"rec_id": rec_id},
stream=sys.stderr)
return create_new_record(rec_id=rec_id, pretend=pretend)
else:
# Since --force was not used we are going to raise an error
write_message(" Failed: tag 001 found in the xml"
" submitted with value %(rec_id)s. The"
" corresponding record however does not"
" exists. If you want to really create"
" such record, please use the --force"
" parameter when calling bibupload." % {
"rec_id": rec_id}, stream=sys.stderr)
return -1
else:
# The record doesn't exist yet. We shall have try to check
# the SYSNO or OAI or DOI id later.
write_message(" -Tag 001 value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag 001 not found in the xml marc file.", verbose=9)
if rec_id is None:
# 2nd step we look for the SYSNO
sysnos = record_get_field_values(record,
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[0:3],
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[3:4] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[3:4] or "",
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[4:5] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[4:5] or "",
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[5:6])
if sysnos:
sysno = sysnos[0] # there should be only one external SYSNO
write_message(" -Checking if SYSNO " + sysno + \
" exists in the database", verbose=9)
# try to find the corresponding rec id from the database
rec_id = find_record_from_sysno(sysno)
if rec_id is not None:
# rec_id found
pass
else:
# The record doesn't exist yet. We will try to check
# external and internal OAI ids later.
write_message(" -Tag SYSNO value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag SYSNO not found in the xml marc file.",
verbose=9)
if rec_id is None:
# 2nd step we look for the external OAIID
extoai_fields = record_get_field_instances(record,
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:3],
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3:4] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3:4] or "",
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4:5] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4:5] or "")
if extoai_fields:
for field in extoai_fields:
extoaiid = field_get_subfield_values(field, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5:6])
extoaisrc = field_get_subfield_values(field, CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[5:6])
if extoaiid:
extoaiid = extoaiid[0]
if extoaisrc:
extoaisrc = extoaisrc[0]
else:
extoaisrc = None
write_message(" -Checking if EXTOAIID %s (%s) exists in the database" % (extoaiid, extoaisrc), verbose=9)
# try to find the corresponding rec id from the database
rec_ids = find_records_from_extoaiid(extoaiid, extoaisrc)
if rec_ids:
# rec_id found
rec_id = rec_ids.pop()
break
else:
# The record doesn't exist yet. We will try to check
# OAI id later.
write_message(" -Tag EXTOAIID value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag EXTOAIID not found in the xml marc file.", verbose=9)
if rec_id is None:
# 4th step we look for the OAI ID
oaiidvalues = record_get_field_values(record,
CFG_OAI_ID_FIELD[0:3],
CFG_OAI_ID_FIELD[3:4] != "_" and \
CFG_OAI_ID_FIELD[3:4] or "",
CFG_OAI_ID_FIELD[4:5] != "_" and \
CFG_OAI_ID_FIELD[4:5] or "",
CFG_OAI_ID_FIELD[5:6])
if oaiidvalues:
oaiid = oaiidvalues[0] # there should be only one OAI ID
write_message(" -Check if local OAI ID " + oaiid + \
" exist in the database", verbose=9)
# try to find the corresponding rec id from the database
rec_id = find_record_from_oaiid(oaiid)
if rec_id is not None:
# rec_id found
pass
else:
write_message(" -Tag OAI ID value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag SYSNO not found in the xml marc file.",
verbose=9)
if rec_id is None:
# 5th step we look for the DOI.
record_dois = record_extract_dois(record)
matching_recids = set()
if record_dois:
# try to find the corresponding rec id from the database
for record_doi in record_dois:
possible_recid = find_record_from_doi(record_doi)
if possible_recid:
matching_recids.add(possible_recid)
if len(matching_recids) > 1:
# Oops, this record refers to DOI existing in multiple records.
# Dunno which one to choose.
write_message(" Failed: Multiple records found in the" \
" database %s that match the DOI(s) in the input" \
" MARCXML %s" % (repr(matching_recids), repr(record_dois)),
verbose=1, stream=sys.stderr)
return -1
elif len(matching_recids) == 1:
rec_id = matching_recids.pop()
if opt_mode == 'insert':
write_message(" Failed: DOI tag matching record #%s found in the xml" \
" submitted, you should use the option replace," \
" correct or append to replace an existing" \
" record. (-h for help)" % rec_id,
verbose=1, stream=sys.stderr)
return -1
else:
write_message(" - Tag DOI value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag DOI not found in the xml marc file.",
verbose=9)
# Now we should have detected rec_id from SYSNO or OAIID
# tags. (None otherwise.)
if rec_id:
if opt_mode == 'insert':
write_message(" Failed: Record found in the database," \
" you should use the option replace," \
" correct or append to replace an existing" \
" record. (-h for help)",
verbose=1, stream=sys.stderr)
return -1
else:
if opt_mode != 'insert' and \
opt_mode != 'replace_or_insert':
write_message(" Failed: Record not found in the database."\
" Please insert the file before updating it."\
" (-h for help)", verbose=1, stream=sys.stderr)
return -1
return rec_id and int(rec_id) or None
def check_record_doi_is_unique(rec_id, record):
"""
Check that DOI found in 'record' does not exist in any other
record than 'recid'.
Return (boolean, msg) where 'boolean' would be True if the DOI is
unique.
"""
record_dois = record_extract_dois(record)
if record_dois:
matching_recids = set()
for record_doi in record_dois:
possible_recid = find_record_from_doi(record_doi)
if possible_recid:
matching_recids.add(possible_recid)
if len(matching_recids) > 1:
# Oops, this record refers to DOI existing in multiple records.
msg = " Failed: Multiple records found in the" \
" database %s that match the DOI(s) in the input" \
" MARCXML %s" % (repr(matching_recids), repr(record_dois))
return (False, msg)
elif len(matching_recids) == 1:
matching_recid = matching_recids.pop()
if str(matching_recid) != str(rec_id):
# Oops, this record refers to DOI existing in a different record.
msg = " Failed: DOI(s) %s found in this record (#%s)" \
" already exist(s) in another other record (#%s)" % \
(repr(record_dois), rec_id, matching_recid)
return (False, msg)
return (True, "")
### Insert functions
def create_new_record(rec_id=None, pretend=False):
"""
Create new record in the database
@param rec_id: if specified the new record will have this rec_id.
@type rec_id: int
@return: the allocated rec_id
@rtype: int
@note: in case of errors will be returned None
"""
if rec_id is not None:
try:
rec_id = int(rec_id)
except (ValueError, TypeError), error:
write_message(" Error during the creation_new_record function: %s "
% error, verbose=1, stream=sys.stderr)
return None
if run_sql("SELECT id FROM bibrec WHERE id=%s", (rec_id, )):
write_message(" Error during the creation_new_record function: the requested rec_id %s already exists." % rec_id)
return None
if pretend:
if rec_id:
return rec_id
else:
return run_sql("SELECT max(id)+1 FROM bibrec")[0][0]
if rec_id is not None:
return run_sql("INSERT INTO bibrec (id, creation_date, modification_date) VALUES (%s, NOW(), NOW())", (rec_id, ))
else:
return run_sql("INSERT INTO bibrec (creation_date, modification_date) VALUES (NOW(), NOW())")
def insert_bibfmt(id_bibrec, marc, bibformat, modification_date='1970-01-01 00:00:00', pretend=False):
"""Insert the format in the table bibfmt"""
# compress the marc value
pickled_marc = compress(marc)
try:
time.strptime(modification_date, "%Y-%m-%d %H:%M:%S")
except ValueError:
modification_date = '1970-01-01 00:00:00'
query = """INSERT LOW_PRIORITY INTO bibfmt (id_bibrec, format, last_updated, value)
VALUES (%s, %s, %s, %s)"""
if not pretend:
row_id = run_sql(query, (id_bibrec, bibformat, modification_date, pickled_marc))
return row_id
else:
return 1
def insert_record_bibxxx(tag, value, pretend=False):
"""Insert the record into bibxxx"""
# determine into which table one should insert the record
table_name = 'bib'+tag[0:2]+'x'
# check if the tag, value combination exists in the table
query = """SELECT id,value FROM %s """ % table_name
query += """ WHERE tag=%s AND value=%s"""
params = (tag, value)
res = None
res = run_sql(query, params)
# Note: compare now the found values one by one and look for
# string binary equality (e.g. to respect lowercase/uppercase
# match), regardless of the charset etc settings. Ideally we
# could use a BINARY operator in the above SELECT statement, but
# we would have to check compatibility on various MySQLdb versions
# etc; this approach checks all matched values in Python, not in
# MySQL, which is less cool, but more conservative, so it should
# work better on most setups.
if res:
for row in res:
row_id = row[0]
row_value = row[1]
if row_value == value:
return (table_name, row_id)
# We got here only when the tag, value combination was not found,
# so it is now necessary to insert the tag, value combination into
# bibxxx table as new.
query = """INSERT INTO %s """ % table_name
query += """ (tag, value) values (%s , %s)"""
params = (tag, value)
if not pretend:
row_id = run_sql(query, params)
else:
return (table_name, 1)
return (table_name, row_id)
def insert_record_bibrec_bibxxx(table_name, id_bibxxx,
field_number, id_bibrec, pretend=False):
"""Insert the record into bibrec_bibxxx"""
# determine into which table one should insert the record
full_table_name = 'bibrec_'+ table_name
# insert the proper row into the table
query = """INSERT INTO %s """ % full_table_name
query += """(id_bibrec,id_bibxxx, field_number) values (%s , %s, %s)"""
params = (id_bibrec, id_bibxxx, field_number)
if not pretend:
res = run_sql(query, params)
else:
return 1
return res
def synchronize_8564(rec_id, record, record_had_FFT, pretend=False):
"""
Synchronize 8564_ tags and BibDocFile tables.
This function directly manipulate the record parameter.
@type rec_id: positive integer
@param rec_id: the record identifier.
@param record: the record structure as created by bibrecord.create_record
@type record_had_FFT: boolean
@param record_had_FFT: True if the incoming bibuploaded-record used FFT
@return: the manipulated record (which is also modified as a side effect)
"""
def merge_marc_into_bibdocfile(field, pretend=False):
"""
Internal function that reads a single field and stores its content
in BibDocFile tables.
@param field: the 8564_ field containing a BibDocFile URL.
"""
write_message('Merging field: %s' % (field, ), verbose=9)
url = field_get_subfield_values(field, 'u')[:1] or field_get_subfield_values(field, 'q')[:1]
description = field_get_subfield_values(field, 'y')[:1]
comment = field_get_subfield_values(field, 'z')[:1]
if url:
recid, docname, docformat = decompose_bibdocfile_url(url[0])
if recid != rec_id:
write_message("INFO: URL %s is not pointing to a fulltext owned by this record (%s)" % (url, recid), stream=sys.stderr)
else:
try:
bibdoc = BibRecDocs(recid).get_bibdoc(docname)
if description and not pretend:
bibdoc.set_description(description[0], docformat)
if comment and not pretend:
bibdoc.set_comment(comment[0], docformat)
except InvenioBibDocFileError:
## Apparently the referenced docname doesn't exist anymore.
## Too bad. Let's skip it.
write_message("WARNING: docname %s does not seem to exist for record %s. Has it been renamed outside FFT?" % (docname, recid), stream=sys.stderr)
def merge_bibdocfile_into_marc(field, subfields):
"""
Internal function that reads BibDocFile table entries referenced by
the URL in the given 8564_ field and integrate the given information
directly with the provided subfields.
@param field: the 8564_ field containing a BibDocFile URL.
@param subfields: the subfields corresponding to the BibDocFile URL
generated after BibDocFile tables.
"""
write_message('Merging subfields %s into field %s' % (subfields, field), verbose=9)
subfields = dict(subfields) ## We make a copy not to have side-effects
subfield_to_delete = []
for subfield_position, (code, value) in enumerate(field_get_subfield_instances(field)):
## For each subfield instance already existing...
if code in subfields:
## ...We substitute it with what is in BibDocFile tables
record_modify_subfield(record, '856', code, subfields[code],
subfield_position, field_position_global=field[4])
del subfields[code]
else:
## ...We delete it otherwise
subfield_to_delete.append(subfield_position)
subfield_to_delete.sort()
for counter, position in enumerate(subfield_to_delete):
## FIXME: Very hackish algorithm. Since deleting a subfield
## will alterate the position of following subfields, we
## are taking note of this and adjusting further position
## by using a counter.
record_delete_subfield_from(record, '856', position - counter,
field_position_global=field[4])
subfields = subfields.items()
subfields.sort()
for code, value in subfields:
## Let's add non-previously existing subfields
record_add_subfield_into(record, '856', code, value,
field_position_global=field[4])
def get_bibdocfile_managed_info():
"""
Internal function, returns a dictionary of
BibDocFile URL -> wanna-be subfields.
This information is retrieved from internal BibDoc
structures rather than from input MARC XML files
@rtype: mapping
@return: BibDocFile URL -> wanna-be subfields dictionary
"""
ret = {}
bibrecdocs = BibRecDocs(rec_id)
latest_files = bibrecdocs.list_latest_files(list_hidden=False)
for afile in latest_files:
url = afile.get_url()
ret[url] = {'u': url}
description = afile.get_description()
comment = afile.get_comment()
subformat = afile.get_subformat()
if description:
ret[url]['y'] = description
if comment:
ret[url]['z'] = comment
if subformat:
ret[url]['x'] = subformat
return ret
write_message("Synchronizing MARC of recid '%s' with:\n%s" % (rec_id, record), verbose=9)
tags856s = record_get_field_instances(record, '856', '%', '%')
write_message("Original 856%% instances: %s" % tags856s, verbose=9)
tags8564s_to_add = get_bibdocfile_managed_info()
write_message("BibDocFile instances: %s" % tags8564s_to_add, verbose=9)
positions_tags8564s_to_remove = []
for local_position, field in enumerate(tags856s):
if field[1] == '4' and field[2] == ' ':
write_message('Analysing %s' % (field, ), verbose=9)
for url in field_get_subfield_values(field, 'u') + field_get_subfield_values(field, 'q'):
if url in tags8564s_to_add:
# there exists a link in the MARC of the record and the connection exists in BibDoc tables
if record_had_FFT:
merge_bibdocfile_into_marc(field, tags8564s_to_add[url])
else:
merge_marc_into_bibdocfile(field, pretend=pretend)
del tags8564s_to_add[url]
break
elif bibdocfile_url_p(url) and decompose_bibdocfile_url(url)[0] == rec_id:
# The link exists and is potentially correct-looking link to a document
# moreover, it refers to current record id ... but it does not exist in
# internal BibDoc structures. This could have happen in the case of renaming a document
# or its removal. In both cases we have to remove link... a new one will be created
positions_tags8564s_to_remove.append(local_position)
write_message("%s to be deleted and re-synchronized" % (field, ), verbose=9)
break
record_delete_fields(record, '856', positions_tags8564s_to_remove)
tags8564s_to_add = tags8564s_to_add.values()
tags8564s_to_add.sort()
for subfields in tags8564s_to_add:
subfields = subfields.items()
subfields.sort()
record_add_field(record, '856', '4', ' ', subfields=subfields)
write_message('Final record: %s' % record, verbose=9)
return record
def _get_subfield_value(field, subfield_code, default=None):
res = field_get_subfield_values(field, subfield_code)
if res != [] and res != None:
return res[0]
else:
return default
def elaborate_mit_tags(record, rec_id, mode, pretend = False, tmp_ids = {},
tmp_vers = {}):
"""
Uploading MoreInfo -> BDM tags
"""
tuple_list = extract_tag_from_record(record, 'BDM')
# Now gathering information from BDR tags - to be processed later
write_message("Processing BDM entries of the record ")
recordDocs = BibRecDocs(rec_id)
if tuple_list:
for mit in record_get_field_instances(record, 'BDM', ' ', ' '):
relation_id = _get_subfield_value(mit, "r")
bibdoc_id = _get_subfield_value(mit, "i")
# checking for a possibly temporary ID
if not (bibdoc_id is None):
bibdoc_id = resolve_identifier(tmp_ids, bibdoc_id)
bibdoc_ver = _get_subfield_value(mit, "v")
if not (bibdoc_ver is None):
bibdoc_ver = resolve_identifier(tmp_vers, bibdoc_ver)
bibdoc_name = _get_subfield_value(mit, "n")
bibdoc_fmt = _get_subfield_value(mit, "f")
moreinfo_str = _get_subfield_value(mit, "m")
if bibdoc_id == None:
if bibdoc_name == None:
raise StandardError("Incorrect relation. Neither name nor identifier of the first obejct has been specified")
else:
# retrieving the ID based on the document name (inside current record)
# The document is attached to current record.
try:
bibdoc_id = recordDocs.get_docid(bibdoc_name)
except:
raise StandardError("BibDoc of a name %s does not exist within a record" % (bibdoc_name, ))
else:
if bibdoc_name != None:
write_message("Warning: both name and id of the first document of a relation have been specified. Ignoring the name")
if (moreinfo_str is None or mode in ("replace", "correct")) and (not pretend):
MoreInfo(docid=bibdoc_id , version = bibdoc_ver,
docformat = bibdoc_fmt, relation = relation_id).delete()
if (not moreinfo_str is None) and (not pretend):
MoreInfo.create_from_serialised(moreinfo_str,
docid=bibdoc_id,
version = bibdoc_ver,
docformat = bibdoc_fmt,
relation = relation_id)
return record
def elaborate_brt_tags(record, rec_id, mode, pretend=False, tmp_ids = {}, tmp_vers = {}):
"""
Process BDR tags describing relations between existing objects
"""
tuple_list = extract_tag_from_record(record, 'BDR')
# Now gathering information from BDR tags - to be processed later
relations_to_create = []
write_message("Processing BDR entries of the record ")
recordDocs = BibRecDocs(rec_id) #TODO: check what happens if there is no record yet ! Will the class represent an empty set?
if tuple_list:
for brt in record_get_field_instances(record, 'BDR', ' ', ' '):
relation_id = _get_subfield_value(brt, "r")
bibdoc1_id = None
bibdoc1_name = None
bibdoc1_ver = None
bibdoc1_fmt = None
bibdoc2_id = None
bibdoc2_name = None
bibdoc2_ver = None
bibdoc2_fmt = None
if not relation_id:
bibdoc1_id = _get_subfield_value(brt, "i")
bibdoc1_name = _get_subfield_value(brt, "n")
if bibdoc1_id == None:
if bibdoc1_name == None:
raise StandardError("Incorrect relation. Neither name nor identifier of the first obejct has been specified")
else:
# retrieving the ID based on the document name (inside current record)
# The document is attached to current record.
try:
bibdoc1_id = recordDocs.get_docid(bibdoc1_name)
except:
raise StandardError("BibDoc of a name %s does not exist within a record" % \
(bibdoc1_name, ))
else:
# resolving temporary identifier
bibdoc1_id = resolve_identifier(tmp_ids, bibdoc1_id)
if bibdoc1_name != None:
write_message("Warning: both name and id of the first document of a relation have been specified. Ignoring the name")
bibdoc1_ver = _get_subfield_value(brt, "v")
if not (bibdoc1_ver is None):
bibdoc1_ver = resolve_identifier(tmp_vers, bibdoc1_ver)
bibdoc1_fmt = _get_subfield_value(brt, "f")
bibdoc2_id = _get_subfield_value(brt, "j")
bibdoc2_name = _get_subfield_value(brt, "o")
if bibdoc2_id == None:
if bibdoc2_name == None:
raise StandardError("Incorrect relation. Neither name nor identifier of the second obejct has been specified")
else:
# retrieving the ID based on the document name (inside current record)
# The document is attached to current record.
try:
bibdoc2_id = recordDocs.get_docid(bibdoc2_name)
except:
raise StandardError("BibDoc of a name %s does not exist within a record" % (bibdoc2_name, ))
else:
bibdoc2_id = resolve_identifier(tmp_ids, bibdoc2_id)
if bibdoc2_name != None:
write_message("Warning: both name and id of the first document of a relation have been specified. Ignoring the name")
bibdoc2_ver = _get_subfield_value(brt, "w")
if not (bibdoc2_ver is None):
bibdoc2_ver = resolve_identifier(tmp_vers, bibdoc2_ver)
bibdoc2_fmt = _get_subfield_value(brt, "g")
control_command = _get_subfield_value(brt, "d")
relation_type = _get_subfield_value(brt, "t")
if not relation_type and not relation_id:
raise StandardError("The relation type must be specified")
more_info = _get_subfield_value(brt, "m")
# the relation id might be specified in the case of updating
# MoreInfo table instead of other fields
rel_obj = None
if not relation_id:
rels = BibRelation.get_relations(rel_type = relation_type,
bibdoc1_id = bibdoc1_id,
bibdoc2_id = bibdoc2_id,
bibdoc1_ver = bibdoc1_ver,
bibdoc2_ver = bibdoc2_ver,
bibdoc1_fmt = bibdoc1_fmt,
bibdoc2_fmt = bibdoc2_fmt)
if len(rels) > 0:
rel_obj = rels[0]
relation_id = rel_obj.id
else:
rel_obj = BibRelation(rel_id=relation_id)
relations_to_create.append((relation_id, bibdoc1_id, bibdoc1_ver,
bibdoc1_fmt, bibdoc2_id, bibdoc2_ver,
bibdoc2_fmt, relation_type, more_info,
rel_obj, control_command))
record_delete_field(record, 'BDR', ' ', ' ')
if mode in ("insert", "replace_or_insert", "append", "correct", "replace"):
# now creating relations between objects based on the data
if not pretend:
for (relation_id, bibdoc1_id, bibdoc1_ver, bibdoc1_fmt,
bibdoc2_id, bibdoc2_ver, bibdoc2_fmt, rel_type,
more_info, rel_obj, control_command) in relations_to_create:
if rel_obj == None:
rel_obj = BibRelation.create(bibdoc1_id = bibdoc1_id,
bibdoc1_ver = bibdoc1_ver,
bibdoc1_fmt = bibdoc1_fmt,
bibdoc2_id = bibdoc2_id,
bibdoc2_ver = bibdoc2_ver,
bibdoc2_fmt = bibdoc2_fmt,
rel_type = rel_type)
relation_id = rel_obj.id
if mode in ("replace"):
# Clearing existing MoreInfo content
rel_obj.get_more_info().delete()
if more_info:
MoreInfo.create_from_serialised(more_info, relation = relation_id)
if control_command == "DELETE":
rel_obj.delete()
else:
write_message("BDR tag is not processed in the %s mode" % (mode, ))
return record
def elaborate_fft_tags(record, rec_id, mode, pretend=False,
tmp_ids = {}, tmp_vers = {}):
"""
Process FFT tags that should contain $a with file pathes or URLs
to get the fulltext from. This function enriches record with
proper 8564 URL tags, downloads fulltext files and stores them
into var/data structure where appropriate.
CFG_BIBUPLOAD_WGET_SLEEP_TIME defines time to sleep in seconds in
between URL downloads.
Note: if an FFT tag contains multiple $a subfields, we upload them
into different 856 URL tags in the metadata. See regression test
case test_multiple_fft_insert_via_http().
"""
# Let's define some handy sub procedure.
def _add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, modification_date, pretend=False):
"""Adds a new format for a given bibdoc. Returns True when everything's fine."""
write_message('Add new format to %s url: %s, format: %s, docname: %s, doctype: %s, newname: %s, description: %s, comment: %s, flags: %s, modification_date: %s' % (repr(bibdoc), url, docformat, docname, doctype, newname, description, comment, flags, modification_date), verbose=9)
try:
if not url: # Not requesting a new url. Just updating comment & description
return _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=pretend)
try:
if not pretend:
bibdoc.add_file_new_format(url, description=description, comment=comment, flags=flags, modification_date=modification_date)
except StandardError, e:
write_message("('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') not inserted because format already exists (%s)." % (url, docformat, docname, doctype, newname, description, comment, flags, modification_date, e), stream=sys.stderr)
raise
except Exception, e:
write_message("Error in adding '%s' as a new format because of: %s" % (url, e), stream=sys.stderr)
raise
return True
def _add_new_version(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, modification_date, pretend=False):
"""Adds a new version for a given bibdoc. Returns True when everything's fine."""
write_message('Add new version to %s url: %s, format: %s, docname: %s, doctype: %s, newname: %s, description: %s, comment: %s, flags: %s' % (repr(bibdoc), url, docformat, docname, doctype, newname, description, comment, flags))
try:
if not url:
return _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=pretend)
try:
if not pretend:
bibdoc.add_file_new_version(url, description=description, comment=comment, flags=flags, modification_date=modification_date)
except StandardError, e:
write_message("('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') not inserted because '%s'." % (url, docformat, docname, doctype, newname, description, comment, flags, modification_date, e), stream=sys.stderr)
raise
except Exception, e:
write_message("Error in adding '%s' as a new version because of: %s" % (url, e), stream=sys.stderr)
raise
return True
def _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=False):
"""Directly update comments and descriptions."""
write_message('Just updating description and comment for %s with format %s with description %s, comment %s and flags %s' % (docname, docformat, description, comment, flags), verbose=9)
try:
if not pretend:
bibdoc.set_description(description, docformat)
bibdoc.set_comment(comment, docformat)
for flag in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
if flag in flags:
bibdoc.set_flag(flag, docformat)
else:
bibdoc.unset_flag(flag, docformat)
except StandardError, e:
write_message("('%s', '%s', '%s', '%s', '%s') description and comment not updated because '%s'." % (docname, docformat, description, comment, flags, e))
raise
return True
def _process_document_moreinfos(more_infos, docname, version, docformat, mode):
if not mode in ('correct', 'append', 'replace_or_insert', 'replace', 'correct', 'insert'):
print "exited because the mode is incorrect"
return
brd = BibRecDocs(rec_id)
docid = None
try:
docid = brd.get_docid(docname)
except:
raise StandardError("MoreInfo: No document of a given name associated with the record")
if not version:
# We have to retrieve the most recent version ...
version = brd.get_bibdoc(docname).get_latest_version()
doc_moreinfo_s, version_moreinfo_s, version_format_moreinfo_s, format_moreinfo_s = more_infos
if mode in ("replace", "replace_or_insert"):
if doc_moreinfo_s: #only if specified, otherwise do not touch
MoreInfo(docid = docid).delete()
if format_moreinfo_s: #only if specified... otherwise do not touch
MoreInfo(docid = docid, docformat = docformat).delete()
if not doc_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = doc_moreinfo_s, docid = docid)
if not version_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = version_moreinfo_s,
docid = docid, version = version)
if not version_format_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = version_format_moreinfo_s,
docid = docid, version = version,
docformat = docformat)
if not format_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = format_moreinfo_s,
docid = docid, docformat = docformat)
if mode == 'delete':
raise StandardError('FFT tag specified but bibupload executed in --delete mode')
tuple_list = extract_tag_from_record(record, 'FFT')
if tuple_list: # FFT Tags analysis
write_message("FFTs: "+str(tuple_list), verbose=9)
docs = {} # docnames and their data
for fft in record_get_field_instances(record, 'FFT', ' ', ' '):
# Very first, we retrieve the potentially temporary odentifiers...
#even if the rest fails, we should include them in teh dictionary
version = _get_subfield_value(fft, 'v', '')
# checking if version is temporary... if so, filling a different varaible
is_tmp_ver, bibdoc_tmpver = parse_identifier(version)
if is_tmp_ver:
version = None
else:
bibdoc_tmpver = None
if not version: #treating cases of empty string etc...
version = None
bibdoc_tmpid = field_get_subfield_values(fft, 'i')
if bibdoc_tmpid:
bibdoc_tmpid = bibdoc_tmpid[0]
else:
bibdoc_tmpid
is_tmp_id, bibdoc_tmpid = parse_identifier(bibdoc_tmpid)
if not is_tmp_id:
bibdoc_tmpid = None
# In the case of having temporary id's, we dont resolve them yet but signaklise that they have been used
# value -1 means that identifier has been declared but not assigned a value yet
if bibdoc_tmpid:
if bibdoc_tmpid in tmp_ids:
write_message("WARNING: the temporary identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpid, ))
else:
tmp_ids[bibdoc_tmpid] = -1
if bibdoc_tmpver:
if bibdoc_tmpver in tmp_vers:
write_message("WARNING: the temporary version identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpver, ))
else:
tmp_vers[bibdoc_tmpver] = -1
# Let's discover the type of the document
# This is a legacy field and will not be enforced any particular
# check on it.
doctype = _get_subfield_value(fft, 't', 'Main') #Default is Main
# Let's discover the url.
| url = field_get_subfield_values(fft, 'a') | 8,249 | lcc_e | python | null | 866ef2395db510677aaf95209d2798262b2f5f6057fa4995 |
|
# This module is intended to be used by the build/installation scripts of
# extension modules created with SIP. It provides information about file
# locations, version numbers etc., and provides some classes and functions.
#
# Copyright (c) 2014 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of SIP.
#
# This copy of SIP is licensed for use under the terms of the SIP License
# Agreement. See the file LICENSE for more details.
#
# This copy of SIP may also used under the terms of the GNU General Public
# License v2 or v3 as published by the Free Software Foundation which can be
# found in the files LICENSE-GPL2 and LICENSE-GPL3 included in this package.
#
# SIP is supplied WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
import sys
import os
import stat
import string
import re
# These are installation specific values created when SIP was configured.
_pkg_config = {
'arch': '',
'default_bin_dir': '/usr/bin',
'default_mod_dir': '/usr/lib/python2.7/dist-packages',
'default_sip_dir': '/usr/share/sip',
'deployment_target': '',
'platform': 'linux-g++',
'py_conf_inc_dir': '/usr/include/python2.7',
'py_inc_dir': '/usr/include/python2.7',
'py_lib_dir': '/usr/lib/python2.7/config',
'py_version': 0x020708,
'qt_framework': 0,
'sip_bin': '/usr/bin/sip',
'sip_config_args': '-d /usr/lib/python2.7/dist-packages -u STRIP= CFLAGS= -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -D_FORTIFY_SOURCE=2 CFLAGS_RELEASE= CXXFLAGS= -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -D_FORTIFY_SOURCE=2 CXXFLAGS_RELEASE= LFLAGS= -Wl,-z,relro LFLAGS_RELEASE=',
'sip_inc_dir': '/usr/include/python2.7',
'sip_mod_dir': '/usr/lib/python2.7/dist-packages',
'sip_version': 0x041004,
'sip_version_str': '4.16.4',
'universal': ''
}
_default_macros = {
'AIX_SHLIB': '',
'AR': 'ar cqs',
'CC': 'gcc',
'CFLAGS': ' -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -D_FORTIFY_SOURCE=2',
'CFLAGS_APP': '',
'CFLAGS_CONSOLE': '',
'CFLAGS_DEBUG': '-g',
'CFLAGS_EXCEPTIONS_OFF': '',
'CFLAGS_EXCEPTIONS_ON': '',
'CFLAGS_MT': '',
'CFLAGS_MT_DBG': '',
'CFLAGS_MT_DLL': '',
'CFLAGS_MT_DLLDBG': '',
'CFLAGS_RELEASE': '',
'CFLAGS_RTTI_OFF': '',
'CFLAGS_RTTI_ON': '',
'CFLAGS_SHLIB': '-fPIC',
'CFLAGS_STL_OFF': '',
'CFLAGS_STL_ON': '',
'CFLAGS_THREAD': '-D_REENTRANT',
'CFLAGS_WARN_OFF': '-w',
'CFLAGS_WARN_ON': '-Wall -W',
'CHK_DIR_EXISTS': 'test -d',
'CONFIG': 'qt warn_on release incremental link_prl',
'COPY': 'cp -f',
'CXX': 'g++',
'CXXFLAGS': ' -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -D_FORTIFY_SOURCE=2',
'CXXFLAGS_APP': '',
'CXXFLAGS_CONSOLE': '',
'CXXFLAGS_DEBUG': '-g',
'CXXFLAGS_EXCEPTIONS_OFF': '',
'CXXFLAGS_EXCEPTIONS_ON': '',
'CXXFLAGS_MT': '',
'CXXFLAGS_MT_DBG': '',
'CXXFLAGS_MT_DLL': '',
'CXXFLAGS_MT_DLLDBG': '',
'CXXFLAGS_RELEASE': '',
'CXXFLAGS_RTTI_OFF': '',
'CXXFLAGS_RTTI_ON': '',
'CXXFLAGS_SHLIB': '-fPIC',
'CXXFLAGS_STL_OFF': '',
'CXXFLAGS_STL_ON': '',
'CXXFLAGS_THREAD': '-D_REENTRANT',
'CXXFLAGS_WARN_OFF': '-w',
'CXXFLAGS_WARN_ON': '-Wall -W',
'DEFINES': '',
'DEL_FILE': 'rm -f',
'EXTENSION_PLUGIN': '',
'EXTENSION_SHLIB': '',
'INCDIR': '',
'INCDIR_OPENGL': '/usr/X11R6/include',
'INCDIR_X11': '/usr/X11R6/include',
'LFLAGS': ' -Wl,-z,relro',
'LFLAGS_CONSOLE': '',
'LFLAGS_CONSOLE_DLL': '',
'LFLAGS_DEBUG': '',
'LFLAGS_DLL': '',
'LFLAGS_OPENGL': '',
'LFLAGS_PLUGIN': '-shared',
'LFLAGS_RELEASE': '',
'LFLAGS_RPATH': '',
'LFLAGS_SHLIB': '-shared',
'LFLAGS_SONAME': '-Wl,-soname,',
'LFLAGS_THREAD': '',
'LFLAGS_WINDOWS': '',
'LFLAGS_WINDOWS_DLL': '',
'LIB': '',
'LIBDIR': '',
'LIBDIR_OPENGL': '/usr/X11R6/lib',
'LIBDIR_X11': '/usr/X11R6/lib',
'LIBS': '',
'LIBS_CONSOLE': '',
'LIBS_CORE': '',
'LIBS_GUI': '',
'LIBS_NETWORK': '',
'LIBS_OPENGL': '-lGLU -lGL',
'LIBS_RT': '',
'LIBS_RTMT': '',
'LIBS_THREAD': '-lpthread',
'LIBS_WEBKIT': '',
'LIBS_WINDOWS': '',
'LIBS_X11': '-lXext -lX11 -lm',
'LINK': 'g++',
'LINK_SHLIB': 'g++',
'LINK_SHLIB_CMD': '',
'MAKEFILE_GENERATOR': 'UNIX',
'MKDIR': 'mkdir -p',
'RANLIB': '',
'RPATH': '-Wl,-rpath,',
'STRIP': ''
}
# The stack of configuration dictionaries.
_config_stack = []
class Configuration(object):
"""The class that represents SIP configuration values.
"""
def __init__(self, sub_cfg=None):
"""Initialise an instance of the class.
sub_cfg is the list of sub-class configurations. It should be None
when called normally.
"""
# Find the build macros in the closest imported module from where this
# was originally defined.
self._macros = None
for cls in self.__class__.__mro__:
if cls is object:
continue
mod = sys.modules[cls.__module__]
if hasattr(mod, "_default_macros"):
self._macros = mod._default_macros
break
if sub_cfg:
cfg = sub_cfg
else:
cfg = []
cfg.append(_pkg_config)
global _config_stack
_config_stack = cfg
def __getattr__(self, name):
"""Allow configuration values and user options to be handled as
instance variables.
name is the name of the configuration value or user option.
"""
for cfg in _config_stack:
try:
return cfg[name]
except KeyError:
pass
raise AttributeError("\"%s\" is not a valid configuration value or user option" % name)
def build_macros(self):
"""Return the dictionary of platform specific build macros.
"""
return self._macros
def set_build_macros(self, macros):
"""Set the dictionary of build macros to be use when generating
Makefiles.
macros is the dictionary of platform specific build macros.
"""
self._macros = macros
class _UniqueList:
"""A limited list that ensures all its elements are unique.
"""
def __init__(self, value=None):
"""Initialise the instance.
value is the initial value of the list.
"""
if value is None:
self._list = []
else:
self._list = value
def append(self, value):
"""Append a value to the list if it isn't already present.
value is the value to append.
"""
if value not in self._list:
self._list.append(value)
def lextend(self, value):
"""A normal list extend ignoring the uniqueness.
value is the list of elements to append.
"""
self._list.extend(value)
def extend(self, value):
"""Append each element of a value to a list if it isn't already
present.
value is the list of elements to append.
"""
for el in value:
self.append(el)
def as_list(self):
"""Return the list as a raw list.
"""
return self._list
class _Macro:
"""A macro that can be manipulated as a list.
"""
def __init__(self, name, value):
"""Initialise the instance.
name is the name of the macro.
value is the initial value of the macro.
"""
self._name = name
self.set(value)
def set(self, value):
"""Explicitly set the value of the macro.
value is the new value. It may be a string, a list of strings or a
_UniqueList instance.
"""
self._macro = []
if isinstance(value, _UniqueList):
value = value.as_list()
if type(value) == list:
self.extend(value)
else:
self.append(value)
def append(self, value):
"""Append a value to the macro.
value is the value to append.
"""
if value:
self._macro.append(value)
def extend(self, value):
"""Append each element of a value to the macro.
value is the list of elements to append.
"""
for el in value:
self.append(el)
def remove(self, value):
"""Remove a value from the macro. It doesn't matter if the value
wasn't present.
value is the value to remove.
"""
try:
self._macro.remove(value)
except:
pass
def as_list(self):
"""Return the macro as a list.
"""
return self._macro
class Makefile:
"""The base class for the different types of Makefiles.
"""
def __init__(self, configuration, console=0, qt=0, opengl=0, python=0,
threaded=0, warnings=1, debug=0, dir=None,
makefile="Makefile", installs=None, universal=None,
arch=None, deployment_target=None):
"""Initialise an instance of the target. All the macros are left
unchanged allowing scripts to manipulate them at will.
configuration is the current configuration.
console is set if the target is a console (rather than windows) target.
qt is set if the target uses Qt. For Qt v4 a list of Qt libraries may
be specified and a simple non-zero value implies QtCore and QtGui.
opengl is set if the target uses OpenGL.
python is set if the target #includes Python.h.
debug is set to generated a debugging version of the target.
threaded is set if the target requires thread support. It is
automatically set if the target uses Qt and Qt has thread support
enabled.
warnings is set if compiler warning messages are required.
debug is set if debugging symbols should be generated.
dir is the directory for build files and Makefiles.
makefile is the name of the Makefile.
installs is a list of extra install targets. Each element is a two
part list, the first of which is the source and the second is the
destination. If the source is another list then it is a set of source
files and the destination is a directory.
universal is the name of the SDK if the target is a MacOS/X universal
binary. If it is None then the value is taken from the configuration.
arch is the space separated MacOS/X architectures to build. If it is
None then it is taken from the configuration.
deployment_target MacOS/X deployment target. If it is None then it is
taken from the configuration.
"""
if qt:
if not hasattr(configuration, "qt_version"):
error("The target uses Qt but pyqtconfig has not been imported.")
# For Qt v4 interpret Qt support as meaning link against the core
# and GUI libraries (which corresponds to the default qmake
# configuration). Also allow a list of Qt v4 modules to be
# specified.
if configuration.qt_version >= 0x040000:
if type(qt) != list:
qt = ["QtCore", "QtGui"]
self._threaded = configuration.qt_threaded
else:
self._threaded = threaded
self.config = configuration
self.console = console
self._qt = qt
self._opengl = opengl
self._python = python
self._warnings = warnings
self._debug = debug
self._makefile = makefile
self._installs = installs
self._infix = ""
# Make sure the destination directory is an absolute path.
if dir:
self.dir = os.path.abspath(dir)
else:
self.dir = os.path.curdir
# Assume we are building in the source tree.
self._src_dir = self.dir
if universal is None:
self._universal = configuration.universal
else:
self._universal = universal
if arch is None:
self._arch = configuration.arch
else:
self._arch = arch
if deployment_target is None:
self._deployment_target = configuration.deployment_target
else:
self._deployment_target = deployment_target
self._finalised = 0
# Copy the macros and convert them all to instance lists.
macros = configuration.build_macros()
for m in list(macros.keys()):
# Allow the user to override the default.
try:
val = getattr(configuration, m)
except AttributeError:
val = macros[m]
# These require special handling as they are (potentially) a set of
# space separated values rather than a single value that might
# contain spaces.
if m in ("DEFINES", "CONFIG") or m[:6] in ("INCDIR", "LIBDIR"):
val = val.split()
# We also want to treat lists of libraries in the same way so that
# duplicates get eliminated.
if m[:4] == "LIBS":
val = val.split()
self.__dict__[m] = _Macro(m, val)
# This is used to alter the configuration more significantly than can
# be done with just configuration files.
self.generator = self.optional_string("MAKEFILE_GENERATOR", "UNIX")
# These are what configuration scripts normally only need to change.
self.extra_cflags = []
self.extra_cxxflags = []
self.extra_defines = []
self.extra_include_dirs = []
self.extra_lflags = []
self.extra_lib_dirs = []
self.extra_libs = []
# Get these once and make them available to sub-classes.
if sys.platform == "win32":
def_copy = "copy"
def_rm = "del"
def_mkdir = "mkdir"
def_chk_dir_exists = "if not exist"
else:
def_copy = "cp -f"
def_rm = "rm -f"
def_mkdir = "mkdir -p"
def_chk_dir_exists = "test -d"
self.copy = self.optional_string("COPY", def_copy)
self.rm = self.optional_string("DEL_FILE", def_rm)
self.mkdir = self.optional_string("MKDIR", def_mkdir)
self.chkdir = self.optional_string("CHK_DIR_EXISTS", def_chk_dir_exists)
def finalise(self):
"""Finalise the macros by doing any consolidation that isn't specific
to a Makefile.
"""
# Extract the things we might need from the Windows Qt configuration.
# Note that we used to think that if Qt was built with exceptions, RTTI
# and STL support enabled then anything that linked against it also
# needed the same flags. However, detecting this was broken for some
# time and nobody complained. For the moment we'll leave the code in
# but it will never be used.
if self._qt:
wcfg = self.config.qt_winconfig.split()
win_shared = ("shared" in wcfg)
win_exceptions = ("exceptions" in wcfg)
win_rtti = ("rtti" in wcfg)
win_stl = ("stl" in wcfg)
qt_version = self.config.qt_version
else:
win_shared = 1
win_exceptions = 0
win_rtti = 0
win_stl = 0
qt_version = 0
# Get what we are going to transform.
cflags = _UniqueList()
cflags.extend(self.extra_cflags)
cflags.extend(self.optional_list("CFLAGS"))
cxxflags = _UniqueList()
cxxflags.extend(self.extra_cxxflags)
cxxflags.extend(self.optional_list("CXXFLAGS"))
defines = _UniqueList()
defines.extend(self.extra_defines)
defines.extend(self.optional_list("DEFINES"))
incdir = _UniqueList(["."])
incdir.extend(self.extra_include_dirs)
incdir.extend(self.optional_list("INCDIR"))
lflags = _UniqueList()
lflags.extend(self.extra_lflags)
lflags.extend(self.optional_list("LFLAGS"))
libdir = _UniqueList()
libdir.extend(self.extra_lib_dirs)
libdir.extend(self.optional_list("LIBDIR"))
# Handle MacOS/X specific configuration.
if sys.platform == 'darwin':
mac_cflags = []
mac_lflags = []
for a in self._arch.split():
aflag = '-arch ' + a
mac_cflags.append(aflag)
mac_lflags.append(aflag)
if self._universal:
mac_cflags.append('-isysroot %s' % self._universal)
mac_lflags.append('-Wl,-syslibroot,%s' % self._universal)
cflags.lextend(mac_cflags)
cxxflags.lextend(mac_cflags)
lflags.lextend(mac_lflags)
# Don't use a unique list as libraries may need to be searched more
# than once. Also MacOS/X uses the form "-framework lib" so we don't
# want to lose the multiple "-framework".
libs = []
for l in self.extra_libs:
libs.append(self.platform_lib(l))
if self._qt:
libs.extend(self._dependent_libs(l))
libs.extend(self.optional_list("LIBS"))
rpaths = _UniqueList()
for l in self.extra_lib_dirs:
l_dir = os.path.dirname(l)
# This is a hack to ignore PyQt's internal support libraries.
if '/qpy/' in l_dir:
continue
# Ignore relative directories. This is really a hack to handle
# SIP v3 inter-module linking.
if l_dir in ("", ".", ".."):
continue
rpaths.append(l)
if self._python:
incdir.append(self.config.py_inc_dir)
incdir.append(self.config.py_conf_inc_dir)
if sys.platform == "cygwin":
libdir.append(self.config.py_lib_dir)
py_lib = "python%u.%u" % ((self.config.py_version >> 16), ((self.config.py_version >> 8) & 0xff))
libs.append(self.platform_lib(py_lib))
elif sys.platform == "win32":
libdir.append(self.config.py_lib_dir)
py_lib = "python%u%u" % ((self.config.py_version >> 16), ((self.config.py_version >> 8) & 0xff))
# For Borland use the OMF version of the Python library if it
# exists, otherwise assume that Python was built with Borland
# and use the normal library.
if self.generator == "BMAKE":
bpy_lib = py_lib + "_bcpp"
bpy_lib_path = os.path.join(self.config.py_lib_dir, self.platform_lib(bpy_lib))
if os.access(bpy_lib_path, os.F_OK):
py_lib = bpy_lib
if self._debug:
py_lib = py_lib + "_d"
if self.generator != "MINGW":
cflags.append("/D_DEBUG")
cxxflags.append("/D_DEBUG")
libs.append(self.platform_lib(py_lib))
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"):
if win_exceptions:
cflags_exceptions = "CFLAGS_EXCEPTIONS_ON"
cxxflags_exceptions = "CXXFLAGS_EXCEPTIONS_ON"
else:
cflags_exceptions = "CFLAGS_EXCEPTIONS_OFF"
cxxflags_exceptions = "CXXFLAGS_EXCEPTIONS_OFF"
cflags.extend(self.optional_list(cflags_exceptions))
cxxflags.extend(self.optional_list(cxxflags_exceptions))
if win_rtti:
cflags_rtti = "CFLAGS_RTTI_ON"
cxxflags_rtti = "CXXFLAGS_RTTI_ON"
else:
cflags_rtti = "CFLAGS_RTTI_OFF"
cxxflags_rtti = "CXXFLAGS_RTTI_OFF"
cflags.extend(self.optional_list(cflags_rtti))
cxxflags.extend(self.optional_list(cxxflags_rtti))
if win_stl:
cflags_stl = "CFLAGS_STL_ON"
cxxflags_stl = "CXXFLAGS_STL_ON"
else:
cflags_stl = "CFLAGS_STL_OFF"
cxxflags_stl = "CXXFLAGS_STL_OFF"
cflags.extend(self.optional_list(cflags_stl))
cxxflags.extend(self.optional_list(cxxflags_stl))
if self._debug:
if win_shared:
cflags_mt = "CFLAGS_MT_DLLDBG"
cxxflags_mt = "CXXFLAGS_MT_DLLDBG"
else:
cflags_mt = "CFLAGS_MT_DBG"
cxxflags_mt = "CXXFLAGS_MT_DBG"
cflags_debug = "CFLAGS_DEBUG"
cxxflags_debug = "CXXFLAGS_DEBUG"
lflags_debug = "LFLAGS_DEBUG"
else:
if win_shared:
cflags_mt = "CFLAGS_MT_DLL"
cxxflags_mt = "CXXFLAGS_MT_DLL"
else:
cflags_mt = "CFLAGS_MT"
cxxflags_mt = "CXXFLAGS_MT"
cflags_debug = "CFLAGS_RELEASE"
cxxflags_debug = "CXXFLAGS_RELEASE"
lflags_debug = "LFLAGS_RELEASE"
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"):
if self._threaded:
cflags.extend(self.optional_list(cflags_mt))
cxxflags.extend(self.optional_list(cxxflags_mt))
if self.console:
cflags.extend(self.optional_list("CFLAGS_CONSOLE"))
cxxflags.extend(self.optional_list("CXXFLAGS_CONSOLE"))
cflags.extend(self.optional_list(cflags_debug))
cxxflags.extend(self.optional_list(cxxflags_debug))
lflags.extend(self.optional_list(lflags_debug))
if self._warnings:
cflags_warn = "CFLAGS_WARN_ON"
cxxflags_warn = "CXXFLAGS_WARN_ON"
else:
cflags_warn = "CFLAGS_WARN_OFF"
cxxflags_warn = "CXXFLAGS_WARN_OFF"
cflags.extend(self.optional_list(cflags_warn))
cxxflags.extend(self.optional_list(cxxflags_warn))
if self._threaded:
cflags.extend(self.optional_list("CFLAGS_THREAD"))
cxxflags.extend(self.optional_list("CXXFLAGS_THREAD"))
lflags.extend(self.optional_list("LFLAGS_THREAD"))
if self._qt:
# Get the name of the mkspecs directory.
try:
specd_base = self.config.qt_data_dir
except AttributeError:
specd_base = self.config.qt_dir
mkspecs = os.path.join(specd_base, "mkspecs")
if self.generator != "UNIX" and win_shared:
defines.append("QT_DLL")
if not self._debug:
defines.append("QT_NO_DEBUG")
if qt_version >= 0x040000:
for mod in self._qt:
# Note that qmake doesn't define anything for QtHelp.
if mod == "QtCore":
defines.append("QT_CORE_LIB")
elif mod == "QtDeclarative":
defines.append("QT_DECLARATIVE_LIB")
elif mod == "QtGui":
defines.append("QT_GUI_LIB")
elif mod == "QtMultimedia":
defines.append("QT_MULTIMEDIA_LIB")
elif mod == "QtNetwork":
defines.append("QT_NETWORK_LIB")
elif mod == "QtOpenGL":
defines.append("QT_OPENGL_LIB")
elif mod == "QtScript":
defines.append("QT_SCRIPT_LIB")
elif mod == "QtScriptTools":
defines.append("QT_SCRIPTTOOLS_LIB")
elif mod == "QtSql":
defines.append("QT_SQL_LIB")
elif mod == "QtTest":
defines.append("QT_TEST_LIB")
elif mod == "QtWebKit":
defines.append("QT_WEBKIT_LIB")
elif mod == "QtXml":
defines.append("QT_XML_LIB")
elif mod == "QtXmlPatterns":
defines.append("QT_XMLPATTERNS_LIB")
elif mod == "phonon":
defines.append("QT_PHONON_LIB")
if qt_version >= 0x050000:
if mod == "QtTest":
defines.append("QT_GUI_LIB")
if mod in ("QtSql", "QtTest"):
defines.append("QT_WIDGETS_LIB")
elif self._threaded:
defines.append("QT_THREAD_SUPPORT")
# Handle library directories.
libdir_qt = self.optional_list("LIBDIR_QT")
libdir.extend(libdir_qt)
rpaths.extend(libdir_qt)
if qt_version >= 0x040000:
# Try and read QT_LIBINFIX from qconfig.pri.
qconfig = os.path.join(mkspecs, "qconfig.pri")
self._infix = self._extract_value(qconfig, "QT_LIBINFIX")
# For Windows: the macros that define the dependencies on
# Windows libraries.
wdepmap = {
"QtCore": "LIBS_CORE",
"QtGui": "LIBS_GUI",
"QtNetwork": "LIBS_NETWORK",
"QtOpenGL": "LIBS_OPENGL",
"QtWebKit": "LIBS_WEBKIT"
}
# For Windows: the dependencies between Qt libraries.
qt5_depmap = {
"QtDeclarative": ("QtXmlPatterns", "QtNetwork", "QtSql", "QtScript", "QtWidgets", "QtGui", "QtCore"),
"QtGui": ("QtPrintSupport", "QtWidgets", "QtCore"),
"QtHelp": ("QtNetwork", "QtSql", "QtWidgets", "QtGui", "QtCore"),
"QtMultimedia": ("QtGui", "QtCore"),
"QtNetwork": ("QtCore", ),
"QtOpenGL": ("QtWidgets", "QtGui", "QtCore"),
"QtScript": ("QtCore", ),
"QtScriptTools": ("QtScript", "QtGui", "QtCore"),
"QtSql": ("QtCore", ),
"QtSvg": ("QtXml", "QtWidgets", "QtGui", "QtCore"),
"QtTest": ("QtGui", "QtCore"),
"QtWebKit": ("QtNetwork", "QtWebKitWidgets", "QtWidgets", "QtGui", "QtCore"),
"QtXml": ("QtCore", ),
"QtXmlPatterns": ("QtNetwork", "QtCore"),
"QtDesigner": ("QtGui", "QtCore"),
"QAxContainer": ("Qt5AxBase", "QtWidgets", "QtGui", "QtCore")
}
qt4_depmap = {
"QtAssistant": ("QtNetwork", "QtGui", "QtCore"),
"QtDeclarative": ("QtNetwork", "QtGui", "QtCore"),
"QtGui": ("QtCore", ),
"QtHelp": ("QtSql", "QtGui", "QtCore"),
"QtMultimedia": ("QtGui", "QtCore"),
"QtNetwork": ("QtCore", ),
"QtOpenGL": ("QtGui", "QtCore"),
"QtScript": ("QtCore", ),
"QtScriptTools": ("QtScript", "QtGui", "QtCore"),
"QtSql": ("QtCore", ),
"QtSvg": ("QtXml", "QtGui", "QtCore"),
"QtTest": ("QtGui", "QtCore"),
"QtWebKit": ("QtNetwork", "QtGui", "QtCore"),
"QtXml": ("QtCore", ),
"QtXmlPatterns": ("QtNetwork", "QtCore"),
"phonon": ("QtGui", "QtCore"),
"QtDesigner": ("QtGui", "QtCore"),
"QAxContainer": ("QtGui", "QtCore")
}
if qt_version >= 0x050000:
qt_depmap = qt5_depmap
else:
qt_depmap = qt4_depmap
# The QtSql .prl file doesn't include QtGui as a dependency (at
# least on Linux) so we explcitly set the dependency here for
# everything.
if "QtSql" in self._qt:
if "QtGui" not in self._qt:
self._qt.append("QtGui")
# With Qt v4.2.0, the QtAssistantClient library is now a shared
# library on UNIX. The QtAssistantClient .prl file doesn't
# include QtGui and QtNetwork as a dependency any longer. This
# seems to be a bug in Qt v4.2.0. We explicitly set the
# dependencies here.
if qt_version >= 0x040200 and "QtAssistant" in self._qt:
if "QtGui" not in self._qt:
self._qt.append("QtGui")
if "QtNetwork" not in self._qt:
self._qt.append("QtNetwork")
for mod in self._qt:
lib = self._qt_module_to_lib(mod)
libs.append(self.platform_lib(lib, self._is_framework(mod)))
if sys.platform == "win32":
# On Windows the dependent libraries seem to be in
# qmake.conf rather than the .prl file and the
# inter-dependencies between Qt libraries don't seem to
# be anywhere.
deps = _UniqueList()
if mod in list(wdepmap.keys()):
deps.extend(self.optional_list(wdepmap[mod]))
if mod in list(qt_depmap.keys()):
for qdep in qt_depmap[mod]:
# Ignore the dependency if it is explicitly
# linked.
if qdep not in self._qt:
libs.append(self.platform_lib(self._qt_module_to_lib(qdep)))
if qdep in list(wdepmap.keys()):
deps.extend(self.optional_list(wdepmap[qdep]))
libs.extend(deps.as_list())
else:
libs.extend(self._dependent_libs(lib, self._is_framework(mod)))
else:
# Windows needs the version number appended if Qt is a DLL.
qt_lib = self.config.qt_lib
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE") and win_shared:
qt_lib = qt_lib + version_to_string(qt_version).replace(".", "")
if self.config.qt_edition == "non-commercial":
qt_lib = qt_lib + "nc"
libs.append(self.platform_lib(qt_lib, self.config.qt_framework))
libs.extend(self._dependent_libs(self.config.qt_lib))
# Handle header directories.
specd = os.path.join(mkspecs, "default")
if not os.access(specd, os.F_OK):
specd = os.path.join(mkspecs, self.config.platform)
incdir.append(specd)
qtincdir = self.optional_list("INCDIR_QT")
if qtincdir:
if qt_version >= 0x040000:
for mod in self._qt:
if mod == "QAxContainer":
incdir.append(os.path.join(qtincdir[0], "ActiveQt"))
elif self._is_framework(mod):
idir = libdir_qt[0]
if mod == "QtAssistant" and qt_version < 0x040202:
mod = "QtAssistantClient"
incdir.append(os.path.join(idir,
mod + ".framework", "Headers"))
if qt_version >= 0x050000:
if mod == "QtGui":
incdir.append(os.path.join(idir,
"QtWidgets.framework", "Headers"))
incdir.append(os.path.join(idir,
"QtPrintSupport.framework",
"Headers"))
elif mod == "QtWebKit":
incdir.append(os.path.join(idir,
"QtWebKitWidgets.framework",
"Headers"))
else:
idir = qtincdir[0]
incdir.append(os.path.join(idir, mod))
if qt_version >= 0x050000:
if mod == "QtGui":
incdir.append(os.path.join(idir,
"QtWidgets"))
incdir.append(os.path.join(idir,
"QtPrintSupport"))
elif mod == "QtWebKit":
incdir.append(os.path.join(idir,
"QtWebKitWidgets"))
# This must go after the module include directories.
incdir.extend(qtincdir)
if self._opengl:
incdir.extend(self.optional_list("INCDIR_OPENGL"))
lflags.extend(self.optional_list("LFLAGS_OPENGL"))
libdir.extend(self.optional_list("LIBDIR_OPENGL"))
libs.extend(self.optional_list("LIBS_OPENGL"))
if self._qt or self._opengl:
if qt_version < 0x040000 or self._opengl or "QtGui" in self._qt:
incdir.extend(self.optional_list("INCDIR_X11"))
libdir.extend(self.optional_list("LIBDIR_X11"))
libs.extend(self.optional_list("LIBS_X11"))
if self._threaded:
libs.extend(self.optional_list("LIBS_THREAD"))
libs.extend(self.optional_list("LIBS_RTMT"))
else:
libs.extend(self.optional_list("LIBS_RT"))
if self.console:
libs.extend(self.optional_list("LIBS_CONSOLE"))
libs.extend(self.optional_list("LIBS_WINDOWS"))
# Don't append any rpaths
#lflags.extend(self._platform_rpaths(rpaths.as_list()))
# Save the transformed values.
self.CFLAGS.set(cflags)
self.CXXFLAGS.set(cxxflags)
self.DEFINES.set(defines)
self.INCDIR.set(incdir)
self.LFLAGS.set(lflags)
self.LIBDIR.set(libdir)
self.LIBS.set(libs)
# Don't do it again because it has side effects.
self._finalised = 1
def _add_manifest(self, target=None):
"""Add the link flags for creating a manifest file.
"""
if target is None:
target = "$(TARGET)"
self.LFLAGS.append("/MANIFEST")
self.LFLAGS.append("/MANIFESTFILE:%s.manifest" % target)
def _is_framework(self, mod):
"""Return true if the given Qt module is a framework.
"""
return (self.config.qt_framework and (self.config.qt_version >= 0x040200 or mod != "QtAssistant"))
def _qt_module_to_lib(self, mname):
"""Return the name of the Qt library corresponding to a module.
mname is the name of the module.
"""
qt_version = self.config.qt_version
if mname == "QtAssistant":
if qt_version >= 0x040202 and sys.platform == "darwin":
lib = mname
else:
lib = "QtAssistantClient"
else:
lib = mname
lib += self._infix
if self._debug:
if sys.platform == "win32":
lib = lib + "d"
elif sys.platform == "darwin":
if not self._is_framework(mname):
lib = lib + "_debug"
elif qt_version < 0x040200:
lib = lib + "_debug"
qt5_rename = False
if sys.platform == "win32" and "shared" in self.config.qt_winconfig.split():
if (mname in ("QtCore", "QtDeclarative", "QtDesigner", "QtGui",
"QtHelp", "QtMultimedia", "QtNetwork", "QtOpenGL",
"QtScript", "QtScriptTools", "QtSql", "QtSvg",
"QtTest", "QtWebKit", "QtXml", "QtXmlPatterns",
"phonon", "QAxContainer", "QtPrintSupport",
"QtWebKitWidgets", "QtWidgets") or
(qt_version >= 0x040200 and mname == "QtAssistant")):
if mname == "QAxContainer":
if qt_version >= 0x050000:
lib = "Qt5" + lib[1:]
elif qt_version >= 0x050000:
qt5_rename = True
else:
lib = lib + "4"
elif sys.platform.startswith("linux") and qt_version >= 0x050000:
qt5_rename = True
if qt5_rename:
lib = "Qt5" + lib[2:]
return lib
def optional_list(self, name):
"""Return an optional Makefile macro as a list.
name is the name of the macro.
"""
return self.__dict__[name].as_list()
def optional_string(self, name, default=""):
"""Return an optional Makefile macro as a string.
name is the name of the macro.
default is the default value
"""
s = ' '.join(self.optional_list(name))
if not s:
s = default
return s
def required_string(self, name):
"""Return a required Makefile macro as a string.
name is the name of the macro.
"""
s = self.optional_string(name)
if not s:
raise ValueError("\"%s\" must have a non-empty value" % name)
return s
def _platform_rpaths(self, rpaths):
"""Return a list of platform specific rpath flags.
rpaths is the cannonical list of rpaths.
"""
flags = []
prefix = self.optional_string("RPATH")
if prefix == "":
# This was renamed in Qt v4.7.
prefix = self.optional_string("LFLAGS_RPATH")
if prefix != "":
for r in rpaths:
flags.append(_quote(prefix + r))
return flags
def platform_lib(self, clib, framework=0):
"""Return a library name in platform specific form.
clib is the library name in cannonical form.
framework is set of the library is implemented as a MacOS framework.
"""
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"):
plib = clib + ".lib"
elif sys.platform == "darwin" and framework:
plib = "-framework " + clib
else:
plib = "-l" + clib
return plib
def _dependent_libs(self, clib, framework=0):
"""Return a list of additional libraries (in platform specific form)
that must be linked with a library.
clib is the library name in cannonical form.
framework is set of the library is implemented as a MacOS framework.
"""
##################################################################
# Generally, the linker is intelligent enough not to need this #
# additional information! #
# And Qt4's pkg-config and prl files are broken #
# Changed for Debian packaging, Torsten Marek <shlomme@gmx.net> #
##################################################################
return []
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"):
prl_name = os.path.join(self.config.qt_lib_dir, clib + ".prl")
elif sys.platform == "darwin" and framework:
prl_name = os.path.join(self.config.qt_lib_dir, clib + ".framework", clib + ".prl")
else:
prl_name = os.path.join(self.config.qt_lib_dir, "lib" + clib + ".prl")
libs = self._extract_value(prl_name, "QMAKE_PRL_LIBS").split()
if self.config.qt_version >= 0x050000:
xtra_libs = []
if clib in ("QtGui", "Qt5Gui"):
xtra_libs.append("QtWidgets")
xtra_libs.append("QtPrintSupport")
elif clib in ("QtWebKit", "Qt5WebKit"):
xtra_libs.append("QtWebKitWidgets")
for xtra in xtra_libs:
libs.extend(
self.platform_lib(
self._qt_module_to_lib(xtra), framework).split())
return libs
def _extract_value(self, fname, vname):
"""Return the stripped value from a name=value line in a file.
fname is the name of the file.
vname is the name of the value.
"""
value = ""
if os.access(fname, os.F_OK):
try:
f = open(fname, "r")
except IOError:
error("Unable to open \"%s\"" % fname)
line = f.readline()
while line:
line = line.strip()
if line and line[0] != "#":
eq = line.find("=")
if eq > 0 and line[:eq].strip() == vname:
value = line[eq + 1:].strip()
break
line = f.readline()
f.close()
return value
def parse_build_file(self, filename):
"""
Parse a build file and return the corresponding dictionary.
filename is the name of the build file. If it is a dictionary instead
then its contents are validated.
"""
if type(filename) == dict:
bfname = "dictionary"
bdict = filename
else:
if os.path.isabs(filename):
# We appear to be building out of the source tree.
self._src_dir = os.path.dirname(filename)
bfname = filename
else:
bfname = os.path.join(self.dir, filename)
bdict = {}
try:
f = open(bfname, "r")
except IOError:
error("Unable to open \"%s\"" % bfname)
line_nr = 1
line = f.readline()
while line:
line = line.strip()
if line and line[0] != "#":
eq = line.find("=")
if eq <= 0:
error("\"%s\" line %d: Line must be in the form 'name = value value...'." % (bfname, line_nr))
bdict[line[:eq].strip()] = line[eq + 1:].strip()
line_nr = line_nr + 1
line = f.readline()
f.close()
# Check the compulsory values.
for i in ("target", "sources"):
try:
bdict[i]
except KeyError:
error("\"%s\" is missing from \"%s\"." % (i, bfname))
# Get the optional values.
for i in ("headers", "moc_headers"):
try:
bdict[i]
except KeyError:
bdict[i] = ""
# Generate the list of objects.
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"):
ext = ".obj"
else:
ext = ".o"
olist = []
for f in bdict["sources"].split():
root, discard = os.path.splitext(f)
olist.append(root + ext)
for f in bdict["moc_headers"].split():
if not self._qt:
error("\"%s\" defines \"moc_headers\" for a non-Qt module." % bfname)
root, discard = os.path.splitext(f)
olist.append("moc_" + root + ext)
bdict["objects"] = ' '.join(olist)
return bdict
def clean_build_file_objects(self, mfile, build):
"""Generate the clean target.
mfile is the file object.
build is the dictionary created from the build file.
"""
mfile.write("\t-%s $(TARGET)\n" % self.rm)
for f in build["objects"].split():
mfile.write("\t-%s %s\n" % (self.rm, f))
for f in build["moc_headers"].split():
root, discard = os.path.splitext(f)
mfile.write("\t-%s moc_%s.cpp\n" % (self.rm, root))
def ready(self):
"""The Makefile is now ready to be used.
"""
if not self._finalised:
self.finalise()
def generate(self):
"""Generate the Makefile.
"""
self.ready()
# Make sure the destination directory exists.
try:
os.makedirs(self.dir)
except:
pass
mfname = os.path.join(self.dir, self._makefile)
try:
mfile = open(mfname, "w")
except IOError:
error("Unable to create \"%s\"" % mfname)
self.generate_macros_and_rules(mfile)
self.generate_target_default(mfile)
self.generate_target_install(mfile)
if self._installs:
if type(self._installs) != list:
self._installs = [self._installs]
for src, dst in self._installs:
self.install_file(mfile, src, dst)
self.generate_target_clean(mfile)
mfile.close()
def generate_macros_and_rules(self, mfile):
"""The default implementation of the macros and rules generation.
mfile is the file object.
"""
if self._deployment_target:
mfile.write("export MACOSX_DEPLOYMENT_TARGET = %s\n" % self._deployment_target)
mfile.write("CC = %s\n" % self.required_string("CC"))
mfile.write("CXX = %s\n" % self.required_string("CXX"))
mfile.write("LINK = %s\n" % self.required_string("LINK"))
cppflags = []
if not self._debug:
cppflags.append("-DNDEBUG")
for f in self.optional_list("DEFINES"):
cppflags.append("-D" + f)
for f in self.optional_list("INCDIR"):
cppflags.append("-I" + _quote(f))
libs = []
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD"):
libdir_prefix = "/LIBPATH:"
else:
libdir_prefix = "-L"
for ld in self.optional_list("LIBDIR"):
if sys.platform == "darwin" and self.config.qt_framework:
fflag = "-F" + _quote(ld)
libs.append(fflag)
cppflags.append(fflag)
libs.append(libdir_prefix + _quote(ld))
libs.extend(self.optional_list("LIBS"))
mfile.write("CPPFLAGS = %s\n" % ' '.join(cppflags))
mfile.write("CFLAGS = %s\n" % self.optional_string("CFLAGS"))
mfile.write("CXXFLAGS = %s\n" % self.optional_string("CXXFLAGS"))
mfile.write("LFLAGS = %s\n" % self.optional_string("LFLAGS"))
mfile.write("LIBS = %s\n" % ' '.join(libs))
if self._qt:
mfile.write("MOC = %s\n" % _quote(self.required_string("MOC")))
if self._src_dir != self.dir:
mfile.write("VPATH = %s\n\n" % self._src_dir)
# These probably don't matter.
if self.generator == "MINGW":
mfile.write(".SUFFIXES: .cpp .cxx .cc .C .c\n\n")
elif self.generator == "UNIX":
mfile.write(".SUFFIXES: .c .o .cpp .cc .cxx .C\n\n")
else:
mfile.write(".SUFFIXES: .c .cpp .cc .cxx .C\n\n")
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD"):
mfile.write("""
{.}.cpp{}.obj::
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -Fo @<<
\t$<
<<
{.}.cc{}.obj::
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -Fo @<<
\t$<
<<
{.}.cxx{}.obj::
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -Fo @<<
\t$<
<<
{.}.C{}.obj::
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -Fo @<<
\t$<
<<
{.}.c{}.obj::
\t$(CC) -c $(CFLAGS) $(CPPFLAGS) -Fo @<<
\t$<
<<
""")
elif self.generator == "BMAKE":
mfile.write("""
.cpp.obj:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o$@ $<
.cc.obj:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o$@ $<
.cxx.obj:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o$@ $<
.C.obj:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o$@ $<
.c.obj:
\t$(CC) -c $(CFLAGS) $(CPPFLAGS) -o$@ $<
""")
else:
mfile.write("""
.cpp.o:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o $@ $<
.cc.o:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o $@ $<
.cxx.o:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o $@ $<
.C.o:
\t$(CXX) -c $(CXXFLAGS) $(CPPFLAGS) -o $@ $<
.c.o:
\t$(CC) -c $(CFLAGS) $(CPPFLAGS) -o $@ $<
""")
def generate_target_default(self, mfile):
"""The default implementation of the default target.
mfile is the file object.
"""
mfile.write("\nall:\n")
def generate_target_install(self, mfile):
"""The default implementation of the install target.
mfile is the file object.
"""
mfile.write("\ninstall:\n")
def generate_target_clean(self, mfile):
"""The default implementation of the clean target.
mfile is the file object.
"""
mfile.write("\nclean:\n")
def install_file(self, mfile, src, dst, strip=0):
"""Install one or more files in a directory.
mfile is the file object.
src is the name of a single file to install, or the list of a number of
files to install.
dst is the name of the destination directory.
strip is set if the files should be stripped after been installed.
"""
# Help package builders.
if self.generator == "UNIX":
dst = "$(DESTDIR)" + dst
mfile.write("\t@%s %s " % (self.chkdir, _quote(dst)))
if self.generator == "UNIX":
mfile.write("|| ")
mfile.write("%s %s\n" % (self.mkdir, _quote(dst)))
if type(src) != list:
src = [src]
# Get the strip command if needed.
if strip:
strip_cmd = self.optional_string("STRIP")
if not strip_cmd:
strip = 0
for sf in src:
target = _quote(os.path.join(dst, os.path.basename(sf)))
mfile.write("\t%s %s %s\n" % (self.copy, _quote(sf), target))
if strip:
mfile.write("\t%s %s\n" % (strip_cmd, target))
class ParentMakefile(Makefile):
"""The class that represents a parent Makefile.
"""
def __init__(self, configuration, subdirs, dir=None, makefile="Makefile",
installs=None):
"""Initialise an instance of a parent Makefile.
subdirs is the sequence of subdirectories.
"""
Makefile.__init__(self, configuration, dir=dir, makefile=makefile, installs=installs)
self._subdirs = subdirs
def generate_macros_and_rules(self, mfile):
"""Generate the macros and rules.
mfile is the file object.
"""
# We don't want them.
pass
def generate_target_default(self, mfile):
"""Generate the default target.
mfile is the file object.
"""
self._subdir_target(mfile)
def generate_target_install(self, mfile):
"""Generate the install target.
mfile is the file object.
"""
self._subdir_target(mfile, "install")
def generate_target_clean(self, mfile):
"""Generate the clean target.
mfile is the file object.
"""
self._subdir_target(mfile, "clean")
def _subdir_target(self, mfile, target="all"):
"""Create a target for a list of sub-directories.
mfile is the file object.
target is the name of the target.
"""
if target == "all":
tname = ""
else:
tname = " " + target
mfile.write("\n" + target + ":\n")
for d in self._subdirs:
if self.generator == "MINGW":
mfile.write("\t@$(MAKE) -C %s%s\n" % (d, tname))
elif self.generator == "UNIX":
mfile.write("\t@(cd %s; $(MAKE)%s)\n" % (d, tname))
else:
mfile.write("\tcd %s\n" % d)
mfile.write("\t$(MAKE)%s\n" % tname)
mfile.write("\t@cd ..\n")
class PythonModuleMakefile(Makefile):
"""The class that represents a Python module Makefile.
"""
def __init__(self, configuration, dstdir, srcdir=None, dir=None,
makefile="Makefile", installs=None):
"""Initialise an instance of a parent Makefile.
dstdir is the name of the directory where the module's Python code will
be installed.
srcdir is the name of the directory (relative to the directory in which
the Makefile will be created) containing the module's Python code. It
defaults to the same directory.
"""
Makefile.__init__(self, configuration, dir=dir, makefile=makefile, installs=installs)
if not srcdir:
srcdir = "."
if dir:
self._moddir = os.path.join(dir, srcdir)
else:
self._moddir = srcdir
self._srcdir = srcdir
self._dstdir = dstdir
def generate_macros_and_rules(self, mfile):
"""Generate the macros and rules.
mfile is the file object.
"""
# We don't want them.
pass
def generate_target_install(self, mfile):
"""Generate the install target.
mfile is the file object.
"""
Makefile.generate_target_install(self, mfile)
for root, dirs, files in os.walk(self._moddir):
# Do not recurse into certain directories.
for skip in (".svn", "CVS"):
if skip in dirs:
dirs.remove(skip)
tail = root[len(self._moddir):]
flist = []
for f in files:
if f == "Makefile":
continue
if os.path.isfile(os.path.join(root, f)):
flist.append(os.path.join(self._srcdir + tail, f))
self.install_file(mfile, flist, self._dstdir + tail)
class ModuleMakefile(Makefile):
"""The class that represents a Python extension module Makefile
"""
def __init__(self, configuration, build_file, install_dir=None, static=0,
console=0, qt=0, opengl=0, threaded=0, warnings=1, debug=0,
dir=None, makefile="Makefile", installs=None, strip=1,
export_all=0, universal=None, arch=None,
deployment_target=None):
"""Initialise an instance of a module Makefile.
build_file is the file containing the target specific information. If
it is a dictionary instead then its contents are validated.
install_dir is the directory the target will be installed in.
static is set if the module should be built as a static library.
strip is set if the module should be stripped of unneeded symbols when
installed. The default is 1.
export_all is set if all the module's symbols should be exported rather
than just the module's initialisation function. Exporting all symbols
increases the size of the module and slows down module load times but
may avoid problems with modules that use exceptions. The default is 0.
"""
Makefile.__init__(self, configuration, console, qt, opengl, 1, threaded, warnings, debug, dir, makefile, installs, universal, arch, deployment_target)
self._build = self.parse_build_file(build_file)
self._install_dir = install_dir
self.static = static
self._manifest = ("embed_manifest_dll" in self.optional_list("CONFIG"))
# Don't strip or restrict the exports if this is a debug or static
# build.
if debug or static:
self._strip = 0
self._limit_exports = 0
else:
self._strip = strip
self._limit_exports = not export_all
# Save the target name for later.
self._target = self._build["target"]
# The name of the module entry point is Python version specific.
if self.config.py_version >= 0x030000:
self._entry_point = "PyInit_%s" % self._target
else:
self._entry_point = "init%s" % self._target
if sys.platform != "win32" and static:
self._target = "lib" + self._target
if sys.platform == "win32" and debug:
self._target = self._target + "_d"
def finalise(self):
"""Finalise the macros common to all module Makefiles.
"""
if self.console:
lflags_console = "LFLAGS_CONSOLE"
else:
lflags_console = "LFLAGS_WINDOWS"
if self.static:
self.DEFINES.append("SIP_STATIC_MODULE")
else:
self.CFLAGS.extend(self.optional_list("CFLAGS_SHLIB"))
self.CXXFLAGS.extend(self.optional_list("CXXFLAGS_SHLIB"))
lflags_dll = self.optional_list("LFLAGS_DLL")
if lflags_dll:
self.LFLAGS.extend(lflags_dll)
elif self.console:
lflags_console = "LFLAGS_CONSOLE_DLL"
else:
lflags_console = "LFLAGS_WINDOWS_DLL"
if self._manifest:
self._add_manifest()
# We use this to explictly create bundles on MacOS. Apple's Python
# can handle extension modules that are bundles or dynamic
# libraries, but python.org versions need bundles (unless built
# with DYNLOADFILE=dynload_shlib.o).
if sys.platform == "darwin":
lflags_plugin = ["-bundle"]
else:
lflags_plugin = self.optional_list("LFLAGS_PLUGIN")
if not lflags_plugin:
lflags_plugin = self.optional_list("LFLAGS_SHLIB")
self.LFLAGS.extend(lflags_plugin)
self.LFLAGS.extend(self.optional_list(lflags_console))
if sys.platform == "darwin":
self.LFLAGS.append("-undefined dynamic_lookup")
Makefile.finalise(self)
if not self.static:
if self.optional_string("AIX_SHLIB"):
# AIX needs a lot of special handling.
if self.required_string('LINK') == 'g++':
# g++ is used for linking.
# For SIP v4 and g++:
# 1.) Import the python symbols
aix_lflags = ['-Wl,-bI:%s/python.exp' % self.config.py_lib_dir]
if self._limit_exports:
aix_lflags.append('-Wl,-bnoexpall')
aix_lflags.append('-Wl,-bnoentry')
aix_lflags.append('-Wl,-bE:%s.exp' % self._target)
else:
# IBM VisualAge C++ is used for linking.
# For SIP v4 and xlC:
# 1.) Create a shared object
# 2.) Import the python symbols
aix_lflags = ['-qmkshrobj',
'-bI:%s/python.exp' % self.config.py_lib_dir]
if self._limit_exports:
aix_lflags.append('-bnoexpall')
aix_lflags.append('-bnoentry')
aix_lflags.append('-bE:%s.exp' % self._target)
self.LFLAGS.extend(aix_lflags)
else:
if self._limit_exports:
if sys.platform[:5] == 'linux':
self.LFLAGS.extend(['-Wl,--version-script=%s.exp' % self._target])
elif sys.platform[:5] == 'sunos':
if self.required_string('LINK') == 'g++':
self.LFLAGS.extend(['-Wl,-z,noversion', '-Wl,-M,%s.exp' % self._target])
else:
self.LFLAGS.extend(['-z' 'noversion', '-M', '%s.exp' % self._target])
elif sys.platform[:5] == 'hp-ux':
self.LFLAGS.extend(['-Wl,+e,%s' % self._entry_point])
elif sys.platform[:5] == 'irix' and self.required_string('LINK') != 'g++':
# Doesn't work when g++ is used for linking on IRIX.
self.LFLAGS.extend(['-Wl,-exported_symbol,%s' % self._entry_point])
# Force the shared linker if there is one.
link_shlib = self.optional_list("LINK_SHLIB")
if link_shlib:
self.LINK.set(link_shlib)
# This made an appearence in Qt v4.4rc1 and breaks extension modules so
# remove it. It was removed at my request but some stupid distros may
# have kept it.
self.LFLAGS.remove('-Wl,--no-undefined')
def module_as_lib(self, mname):
"""Return the name of a SIP v3.x module when it is used as a library.
This will raise an exception when used with SIP v4.x modules.
mname is the name of the module.
"""
raise ValueError("module_as_lib() can only be used with SIP v3.x")
def generate_macros_and_rules(self, mfile):
"""Generate the macros and rules generation.
mfile is the file object.
"""
if self.static:
if sys.platform == "win32":
ext = "lib"
else:
ext = "a"
else:
if sys.platform == "win32":
ext = "pyd"
elif sys.platform == "darwin":
ext = "so"
elif sys.platform == "cygwin":
ext = "dll"
else:
ext = self.optional_string("EXTENSION_PLUGIN")
if not ext:
ext = self.optional_string("EXTENSION_SHLIB", "so")
mfile.write("TARGET = %s\n" % (self._target + "." + ext))
mfile.write("OFILES = %s\n" % self._build["objects"])
mfile.write("HFILES = %s %s\n" % (self._build["headers"], self._build["moc_headers"]))
mfile.write("\n")
if self.static:
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD", "BMAKE"):
mfile.write("LIB = %s\n" % self.required_string("LIB"))
elif self.generator == "MINGW":
mfile.write("AR = %s\n" % self.required_string("LIB"))
self._ranlib = None
else:
mfile.write("AR = %s\n" % self.required_string("AR"))
self._ranlib = self.optional_string("RANLIB")
if self._ranlib:
mfile.write("RANLIB = %s\n" % self._ranlib)
Makefile.generate_macros_and_rules(self, mfile)
def generate_target_default(self, mfile):
"""Generate the default target.
mfile is the file object.
"""
# Do these first so that it's safe for a sub-class to append additional
# commands to the real target, but make sure the default is correct.
mfile.write("\nall: $(TARGET)\n")
mfile.write("\n$(OFILES): $(HFILES)\n")
for mf in self._build["moc_headers"].split():
root, _ = os.path.splitext(mf)
cpp = "moc_" + root + ".cpp"
if self._src_dir != self.dir:
mf = os.path.join(self._src_dir, mf)
mfile.write("\n%s: %s\n" % (cpp, mf))
mfile.write("\t$(MOC) -o %s %s\n" % (cpp, mf))
mfile.write("\n$(TARGET): $(OFILES)\n")
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD"):
if self.static:
mfile.write("\t$(LIB) /OUT:$(TARGET) @<<\n")
mfile.write("\t $(OFILES)\n")
mfile.write("<<\n")
else:
mfile.write("\t$(LINK) $(LFLAGS) /OUT:$(TARGET) @<<\n")
mfile.write("\t $(OFILES) $(LIBS)\n")
mfile.write("<<\n")
if self._manifest:
mfile.write("\tmt -nologo -manifest $(TARGET).manifest -outputresource:$(TARGET);2\n")
elif self.generator == "BMAKE":
if self.static:
mfile.write("\t-%s $(TARGET)\n" % (self.rm))
mfile.write("\t$(LIB) $(TARGET) @&&|\n")
for of in self._build["objects"].split():
mfile.write("+%s \\\n" % (of))
mfile.write("|\n")
else:
mfile.write("\t$(LINK) @&&|\n")
mfile.write("\t$(LFLAGS) $(OFILES) ,$(TARGET),,$(LIBS),%s\n" % (self._target))
mfile.write("|\n")
# Create the .def file that renames the entry point.
defname = os.path.join(self.dir, self._target + ".def")
try:
dfile = open(defname, "w")
except IOError:
error("Unable to create \"%s\"" % defname)
dfile.write("EXPORTS\n")
dfile.write("%s=_%s\n" % (self._entry_point, self._entry_point))
dfile.close()
else:
if self.static:
mfile.write("\t-%s $(TARGET)\n" % self.rm)
mfile.write("\t$(AR) $(TARGET) $(OFILES)\n")
if self._ranlib:
mfile.write("\t$(RANLIB) $(TARGET)\n")
else:
if self._limit_exports:
# Create an export file for AIX, Linux and Solaris.
if sys.platform[:5] == 'linux':
mfile.write("\t@echo '{ global: %s; local: *; };' > %s.exp\n" % (self._entry_point, self._target))
elif sys.platform[:5] == 'sunos':
mfile.write("\t@echo '{ global: %s; local: *; };' > %s.exp\n" % (self._entry_point, self._target))
elif sys.platform[:3] == 'aix':
mfile.write("\t@echo '#!' >%s.exp" % self._target)
mfile.write("; \\\n\t echo '%s' >>%s.exp\n" % (self._entry_point, self._target))
mfile.write("\t$(LINK) $(LFLAGS) -o $(TARGET) $(OFILES) $(LIBS)\n")
def generate_target_install(self, mfile):
"""Generate the install target.
mfile is the file object.
"""
if self._install_dir is None:
self._install_dir = self.config.default_mod_dir
mfile.write("\ninstall: $(TARGET)\n")
self.install_file(mfile, "$(TARGET)", self._install_dir, self._strip)
def generate_target_clean(self, mfile):
"""Generate the clean target.
mfile is the file object.
"""
mfile.write("\nclean:\n")
self.clean_build_file_objects(mfile, self._build)
if self._manifest and not self.static:
mfile.write("\t-%s $(TARGET).manifest\n" % self.rm)
# Remove any export file on AIX, Linux and Solaris.
if self._limit_exports and (sys.platform[:5] == 'linux' or
sys.platform[:5] == 'sunos' or
sys.platform[:3] == 'aix'):
mfile.write("\t-%s %s.exp\n" % (self.rm, self._target))
class SIPModuleMakefile(ModuleMakefile):
"""The class that represents a SIP generated module Makefile.
"""
def __init__(self, configuration, build_file, install_dir=None, static=0,
console=0, qt=0, opengl=0, threaded=0, warnings=1, debug=0,
dir=None, makefile="Makefile", installs=None, strip=1,
export_all=0, universal=None, arch=None, prot_is_public=0,
deployment_target=None):
"""Initialise an instance of a SIP generated module Makefile.
prot_is_public is set if "protected" is to be redefined as "public".
If the platform's C++ ABI allows it this can significantly reduce the
size of the generated code.
For all other arguments see ModuleMakefile.
"""
ModuleMakefile.__init__(self, configuration, build_file, install_dir,
static, console, qt, opengl, threaded, warnings, debug, dir,
makefile, installs, strip, export_all, universal, arch,
deployment_target)
self._prot_is_public = prot_is_public
def finalise(self):
"""Finalise the macros for a SIP generated module Makefile.
"""
if self._prot_is_public:
self.DEFINES.append('SIP_PROTECTED_IS_PUBLIC')
self.DEFINES.append('protected=public')
self.INCDIR.append(self.config.sip_inc_dir)
ModuleMakefile.finalise(self)
class ProgramMakefile(Makefile):
"""The class that represents a program Makefile.
"""
def __init__(self, configuration, build_file=None, install_dir=None,
console=0, qt=0, opengl=0, python=0, threaded=0, warnings=1,
debug=0, dir=None, makefile="Makefile", installs=None,
universal=None, arch=None, deployment_target=None):
"""Initialise an instance of a program Makefile.
build_file is the file containing the target specific information. If
it is a dictionary instead then its contents are validated.
install_dir is the directory the target will be installed in.
"""
Makefile.__init__(self, configuration, console, qt, opengl, python, threaded, warnings, debug, dir, makefile, installs, universal, arch, deployment_target)
self._install_dir = install_dir
self._manifest = ("embed_manifest_exe" in self.optional_list("CONFIG"))
self._target = None
if build_file:
self._build = self.parse_build_file(build_file)
else:
self._build = None
def build_command(self, source):
"""Create a command line that will build an executable. Returns a
tuple of the name of the executable and the command line.
source is the name of the source file.
"""
# The name of the executable.
self._target, _ = os.path.splitext(source)
if sys.platform in ("win32", "cygwin"):
exe = self._target + ".exe"
else:
exe = self._target
self.ready()
# The command line.
build = []
build.append(self.required_string("CXX"))
for a in self._arch.split():
build.append('-arch ' + a)
for f in self.optional_list("DEFINES"):
build.append("-D" + f)
for f in self.optional_list("INCDIR"):
build.append("-I" + _quote(f))
build.extend(self.optional_list("CXXFLAGS"))
# This is for Qt5.
build.extend(self.optional_list("CXXFLAGS_APP"))
# Borland requires all flags to precede all file names.
if self.generator != "BMAKE":
build.append(source)
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD"):
build.append("-Fe")
build.append("/link")
libdir_prefix = "/LIBPATH:"
elif self.generator == "BMAKE":
build.append("-e" + exe)
libdir_prefix = "-L"
else:
build.append("-o")
build.append(exe)
libdir_prefix = "-L"
for ld in self.optional_list("LIBDIR"):
if sys.platform == "darwin" and self.config.qt_framework:
build.append("-F" + _quote(ld))
build.append(libdir_prefix + _quote(ld))
lflags = self.optional_list("LFLAGS")
# This is a huge hack demonstrating my lack of understanding of how the
# Borland compiler works.
if self.generator == "BMAKE":
blflags = []
for lf in lflags:
for f in lf.split():
# Tell the compiler to pass the flags to the linker.
if f[-1] == "-":
f = "-l-" + f[1:-1]
elif f[0] == "-":
f = "-l" + f[1:]
# Remove any explicit object files otherwise the compiler
# will complain that they can't be found, but they don't
# seem to be needed.
if f[-4:].lower() != ".obj":
blflags.append(f)
lflags = blflags
build.extend(lflags)
build.extend(self.optional_list("LIBS"))
if self.generator == "BMAKE":
build.append(source)
return (exe, ' '.join(build))
def finalise(self):
"""Finalise the macros for a program Makefile.
"""
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD"):
self.LFLAGS.append("/INCREMENTAL:NO")
if self._manifest:
self._add_manifest(self._target)
if self.console:
lflags_console = "LFLAGS_CONSOLE"
else:
lflags_console = "LFLAGS_WINDOWS"
self.LFLAGS.extend(self.optional_list(lflags_console))
Makefile.finalise(self)
def generate_macros_and_rules(self, mfile):
"""Generate the macros and rules generation.
mfile is the file object.
"""
if not self._build:
raise ValueError("pass a filename as build_file when generating a Makefile")
target = self._build["target"]
if sys.platform in ("win32", "cygwin"):
target = target + ".exe"
mfile.write("TARGET = %s\n" % target)
mfile.write("OFILES = %s\n" % self._build["objects"])
mfile.write("HFILES = %s\n" % self._build["headers"])
mfile.write("\n")
Makefile.generate_macros_and_rules(self, mfile)
def generate_target_default(self, mfile):
"""Generate the default target.
mfile is the file object.
"""
# Do these first so that it's safe for a sub-class to append additional
# commands to the real target, but make sure the default is correct.
mfile.write("\nall: $(TARGET)\n")
mfile.write("\n$(OFILES): $(HFILES)\n")
for mf in self._build["moc_headers"].split():
root, _ = os.path.splitext(mf)
cpp = "moc_" + root + ".cpp"
if self._src_dir != self.dir:
mf = os.path.join(self._src_dir, mf)
mfile.write("\n%s: %s\n" % (cpp, mf))
mfile.write("\t$(MOC) -o %s %s\n" % (cpp, mf))
mfile.write("\n$(TARGET): $(OFILES)\n")
if self.generator in ("MSVC", "MSVC.NET", "MSBUILD"):
mfile.write("\t$(LINK) $(LFLAGS) /OUT:$(TARGET) @<<\n")
mfile.write("\t $(OFILES) $(LIBS)\n")
mfile.write("<<\n")
elif self.generator == "BMAKE":
mfile.write("\t$(LINK) @&&|\n")
mfile.write("\t$(LFLAGS) $(OFILES) ,$(TARGET),,$(LIBS),,\n")
mfile.write("|\n")
else:
mfile.write("\t$(LINK) $(LFLAGS) -o $(TARGET) $(OFILES) $(LIBS)\n")
if self._manifest:
mfile.write("\tmt -nologo -manifest $(TARGET).manifest -outputresource:$(TARGET);1\n")
def generate_target_install(self, mfile):
"""Generate the install target.
mfile is the file object.
"""
if self._install_dir is None:
self._install_dir = self.config.default_bin_dir
mfile.write("\ninstall: $(TARGET)\n")
self.install_file(mfile, "$(TARGET)", self._install_dir)
def generate_target_clean(self, mfile):
"""Generate the clean target.
mfile is the file object.
"""
mfile.write("\nclean:\n")
self.clean_build_file_objects(mfile, self._build)
if self._manifest:
mfile.write("\t-%s $(TARGET).manifest\n" % self.rm)
def _quote(s):
"""Return a string surrounded by double quotes it if contains a space.
s is the string.
"""
# On Qt5 paths often includes forward slashes so convert them.
if sys.platform == "win32":
s = s.replace("/", "\\")
if s.find(" ") >= 0:
s = '"' + s + '"'
return s
def version_to_string(version, parts=3):
""" Convert an n-part version number encoded as a hexadecimal value to a
string. version is the version number. Returns the string.
"""
part_list = [str((version >> 16) & 0xff)]
if parts > 1:
part_list.append(str((version >> 8) & 0xff))
if parts > 2:
part_list.append(str(version & 0xff))
return '.'.join(part_list)
def read_version(filename, description, numdefine=None, strdefine=None):
"""Read the version information for a package from a file. The information
is specified as #defines of a numeric (hexadecimal or decimal) value and/or
a string value.
filename is the name of the file.
description is the descriptive name of the package.
numdefine is the name of the #define of the numeric version. It is ignored
if it is None.
strdefine is the name of the #define of the string version. It is ignored
if it is None.
Returns a tuple of the version as a number and as a string.
"""
need_num = numdefine is not None
need_str = strdefine is not None
vers = None
versstr = None
f = open(filename)
l = f.readline()
while l and (need_num or need_str):
wl = l.split()
if len(wl) >= 3 and wl[0] == "#define":
if need_num and wl[1] == numdefine:
v = wl[2]
if v[0:2] == "0x":
vers = int(v, 16)
else:
dec = int(v)
maj = dec / 100
min = (dec % 100) / 10
bug = (dec % 10)
vers = (maj << 16) + (min << 8) + bug
need_num = 0
if need_str and wl[1] == strdefine:
# Take account of embedded spaces.
versstr = ' '.join(wl[2:])[1:-1]
need_str = 0
l = f.readline()
f.close()
if need_num or need_str:
error("The %s version number could not be determined by parsing %s." % (description, filename))
return (vers, versstr)
def create_content(cdict, macros=None):
"""Convert a dictionary to a string (typically to use as the content to a
call to create_config_module()). Dictionary values that are strings are
quoted. Dictionary values that are lists are converted to quoted strings.
dict is the dictionary.
macros is the optional dictionary of platform specific build macros.
"""
content = "_pkg_config = {\n"
keys = list(cdict.keys())
keys.sort()
# Format it nicely.
width = 0
for k in keys:
klen = len(k)
if width < klen:
width = klen
for k in keys:
val = cdict[k]
vtype = type(val)
delim = None
if val is None:
val = "None"
elif vtype == list:
val = ' '.join(val)
delim = "'"
elif vtype == int:
if k.find("version") >= 0:
# Assume it's a hexadecimal version number. It doesn't matter
# if it isn't, we are just trying to make it look pretty.
val = "0x%06x" % val
else:
val = str(val)
else:
val = str(val)
delim = "'"
if delim:
if "'" in val:
delim = "'''"
val = delim + val + delim
content = content + " '" + k + "':" + (" " * (width - len(k) + 2)) + val.replace("\\", "\\\\")
if k != keys[-1]:
content = content + ","
content = content + "\n"
content = content + "}\n\n"
# Format the optional macros.
content = content + "_default_macros = "
if macros:
content = content + "{\n"
names = list(macros.keys())
names.sort()
width = 0
for c in names:
clen = len(c)
if width < clen:
width = clen
for c in names:
if c == names[-1]:
sep = ""
else:
sep = ","
val = macros[c]
if "'" in val:
delim = "'''"
else:
delim = "'"
k = "'" + c + "':"
content = content + " %-*s %s%s%s%s\n" % (1 + width + 2, k, delim, val.replace("\\", "\\\\"), delim, sep)
content = content + "}\n"
else:
content = content + "None\n"
return content
def create_config_module(module, template, content, macros=None):
"""Create a configuration module by replacing "@" followed by
"SIP_CONFIGURATION" followed by "@" in a template file with a content
string.
module is the name of the module file.
template is the name of the template file.
content is the content string. If it is a dictionary it is first converted
to a string using create_content().
macros is an optional dictionary of platform specific build macros. It is
only used if create_content() is called to convert the content to a string.
"""
if type(content) == dict:
content = create_content(content, macros)
# Allow this file to used as a template.
key = "@" + "SIP_CONFIGURATION" + "@"
df = open(module, "w")
sf = open(template, "r")
line = sf.readline()
while line:
if line.find(key) >= 0:
line = content
df.write(line)
line = sf.readline()
df.close()
sf.close()
def version_to_sip_tag(version, tags, description):
"""Convert a version number to a SIP tag.
version is the version number. If it is negative then the latest version
is assumed. (This is typically useful if a snapshot is indicated by a
negative version number.)
tags is the dictionary of tags keyed by version number. The tag used is
the one with the smallest key (ie. earliest version) that is greater than
the given version number.
description is the descriptive name of the package used for error messages.
Returns the corresponding tag.
"""
vl = list(tags.keys())
vl.sort()
# For a snapshot use the latest tag.
if version < 0:
tag = tags[vl[-1]]
else:
for v in vl:
if version < v:
tag = tags[v]
break
else:
error("Unsupported %s version: 0x%06x." % (description, version))
return tag
def error(msg):
"""Display an error message and terminate.
msg is the text of the error message.
"""
sys.stderr.write(format("Error: " + msg) + "\n")
sys.exit(1)
def inform(msg):
"""Display an information message.
msg is the text of the error message.
"""
sys.stdout.write(format(msg) + "\n")
def format(msg, leftmargin=0, rightmargin=78):
"""Format a message by inserting line breaks at appropriate places.
msg is the text of the message.
leftmargin is the position of the left margin.
rightmargin is the position of the right margin.
Return the formatted message.
"""
curs = leftmargin
fmsg = " " * leftmargin
for w in msg.split():
l = len(w)
if curs != leftmargin and curs + l > rightmargin:
fmsg = fmsg + "\n" + (" " * leftmargin)
curs = leftmargin
if curs > leftmargin:
fmsg = fmsg + " "
curs = curs + 1
fmsg = fmsg + w
curs = curs + l
return fmsg
def parse_build_macros(filename, names, overrides=None, properties=None):
"""Parse a qmake compatible file of build system macros and convert it to a
dictionary. A macro is a name/value pair. The dictionary is returned or
None if any of the overrides was invalid.
filename is the name of the file to parse.
names is a list of the macro names to extract from the file.
overrides is an optional list of macro names and values that modify those
found in the file. They are of the form "name=value" (in which case the
value replaces the value found in the file) or "name+=value" (in which case
the value is appended to the value found in the file).
properties is an optional dictionary of property name and values that are
used to resolve any expressions of the form "$[name]" in the file.
"""
# Validate and convert the overrides to a dictionary.
orides = {}
if overrides is not None:
for oride in overrides:
prefix = ""
name_end = oride.find("+=")
if name_end >= 0:
prefix = "+"
val_start = name_end + 2
else:
name_end = oride.find("=")
if name_end >= 0:
val_start = name_end + 1
else:
return None
name = oride[:name_end]
if name not in names:
return None
orides[name] = prefix + oride[val_start:]
# This class defines a file like object that handles the nested include()
# directives in qmake files.
class qmake_build_file_reader:
def __init__(self, filename):
self.filename = filename
self.currentfile = None
self.filestack = []
self.pathstack = []
self.cond_fname = None
self._openfile(filename)
def _openfile(self, filename):
try:
f = open(filename, 'r')
except IOError:
# If this file is conditional then don't raise an error.
if self.cond_fname == filename:
return
error("Unable to open %s" % filename)
if self.currentfile:
self.filestack.append(self.currentfile)
self.pathstack.append(self.path)
self.currentfile = f
self.path = os.path.dirname(filename)
def readline(self):
line = self.currentfile.readline()
sline = line.strip()
if self.cond_fname and sline == '}':
# The current condition is closed.
self.cond_fname = None
line = self.currentfile.readline()
elif sline.startswith('exists(') and sline.endswith('{'):
# A new condition is opened so extract the filename.
self.cond_fname = self._normalise(sline[:-1].strip()[7:-1].strip())
line = self.currentfile.readline()
elif sline.startswith('include('):
nextfile = self._normalise(sline[8:-1].strip())
self._openfile(nextfile)
return self.readline()
if not line:
self.currentfile.close()
if self.filestack:
self.currentfile = self.filestack.pop()
self.path = self.pathstack.pop()
return self.readline()
return line
# Normalise a filename by expanding any environment variables and
# making sure it is absolute.
def _normalise(self, fname):
if "$(" in fname:
fname = os.path.normpath(self._expandvars(fname))
if not os.path.isabs(fname):
fname = os.path.join(self.path, fname)
return fname
# Expand the environment variables in a filename.
def _expandvars(self, fname):
i = 0
while True:
m = re.search(r'\$\((\w+)\)', fname[i:])
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name in os.environ:
tail = fname[j:]
fname = fname[:i] + os.environ[name]
i = len(fname)
fname += tail
else:
i = j
return fname
f = qmake_build_file_reader(filename)
# Get everything into a dictionary.
raw = {
"DIR_SEPARATOR": os.sep,
"LITERAL_WHITESPACE": " ",
"LITERAL_DOLLAR": "$",
"LITERAL_HASH": "#"
}
line = f.readline()
while line:
# Handle line continuations.
while len(line) > 1 and line[-2] == "\\":
line = line[:-2]
next = f.readline()
if next:
line = line + next
else:
break
# Strip comments and surrounding whitespace.
line = line.split('#', 1)[0].strip()
if line:
assstart = line.find("+")
if assstart > 0 and line[assstart + 1] == '=':
adding = True
assend = assstart + 1
else:
adding = False
assstart = line.find("=")
assend = assstart
if assstart > 0:
lhs = line[:assstart].strip()
rhs = line[assend + 1:].strip()
# Remove the escapes for any quotes.
rhs = rhs.replace(r'\"', '"').replace(r"\'", "'")
| if adding and rhs != "": | 8,273 | lcc_e | python | null | 2dcbe3f1e795cf4752a0ce542c306fd7bd0fb436d4434e10 |